diff --git a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/aiassist/README.md b/spaces/101-5/gpt4free/g4f/.v1/gpt4free/aiassist/README.md deleted file mode 100644 index b61017841d3c52b8cd079e638b1fa35264aa15af..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/aiassist/README.md +++ /dev/null @@ -1,19 +0,0 @@ -aiassist.site - -### Example: `aiassist` - -```python -import aiassist - -question1 = "Who won the world series in 2020?" -req = aiassist.Completion.create(prompt=question1) -answer = req["text"] -message_id = req["parentMessageId"] - -question2 = "Where was it played?" -req2 = aiassist.Completion.create(prompt=question2, parentMessageId=message_id) -answer2 = req2["text"] - -print(answer) -print(answer2) -``` diff --git a/spaces/101-5/gpt4free/g4f/.v1/unfinished/bing/README.md b/spaces/101-5/gpt4free/g4f/.v1/unfinished/bing/README.md deleted file mode 100644 index 67e8645ced188f048308ad80accee8ef900ef6ef..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/unfinished/bing/README.md +++ /dev/null @@ -1,2 +0,0 @@ -to do: -- code refractoring \ No newline at end of file diff --git a/spaces/1368565466ki/Satdia/mel_processing.py b/spaces/1368565466ki/Satdia/mel_processing.py deleted file mode 100644 index 3e252e76320522a8a4195a60665168f22769aec2..0000000000000000000000000000000000000000 --- a/spaces/1368565466ki/Satdia/mel_processing.py +++ /dev/null @@ -1,101 +0,0 @@ -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Deep.Freeze.Standard.v7.21.020.3 TOP.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Deep.Freeze.Standard.v7.21.020.3 TOP.md deleted file mode 100644 index 4b12433bda33f3f2c971ec33b193cc6848a9dcde..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Deep.Freeze.Standard.v7.21.020.3 TOP.md +++ /dev/null @@ -1,28 +0,0 @@ -
-

What is Deep Freeze Standard and How Does It Work?

-

Deep Freeze Standard is a computer restore software that protects your Windows PCs from unwanted changes and malicious attacks. It uses a patented Reboot-to-Restore technology that makes your computers indestructible by freezing them to a desired state and restoring them to that state with every reboot.

-

With Deep Freeze Standard, you can eliminate troubleshooting, reverse configuration drifts, protect against phishing, eliminate undetected threats, and achieve license compliance. You can also create virtual partitions to retain important data even if there is no separate physical partition available on the computer.

-

Deep.Freeze.Standard.v7.21.020.3


Download Zip 🌟 https://byltly.com/2uKzlA



-

Deep Freeze Standard supports Windows 7, 8, 10 and 11. It is ideal for rugged and on-field computers, classroom and lab computers, hospital computers, point of sale computers, and any other scenario where you need to ensure complete endpoint protection.

-

To install Deep Freeze Standard, you need to download the installer from the official website of Faronics Corporation[^1^]. The installer will guide you through the steps of choosing a password, selecting drives to freeze, creating ThawSpaces, and activating the license key. You can also customize the installation options using command-line parameters.

-

Once installed, Deep Freeze Standard will display an icon on the system tray that indicates the status of the computer: Frozen or Thawed. You can access the configuration menu by double-clicking the icon or pressing CTRL+ALT+SHIFT+F6 and entering your password. From there, you can change the settings, update the software, or uninstall it.

-

Deep Freeze Standard is a powerful and reliable software that can help you maintain your computers in optimal condition and prevent unauthorized or unwanted changes. It is easy to use and requires minimal maintenance. You can try it for free for 30 days by downloading it from the Faronics website[^1^].

- -

How to Use Deep Freeze Standard

-

Using Deep Freeze Standard is simple and straightforward. You can freeze or thaw your computer by using the configuration menu or by using keyboard shortcuts. To freeze your computer, select the option "Boot Frozen" and click "Apply and Reboot". To thaw your computer, select the option "Boot Thawed" and click "Apply and Reboot". You can also choose to thaw your computer for a specific number of restarts or for a specific date and time.

-

When your computer is frozen, any changes made to it will be discarded on reboot. This includes any files saved, software installed, settings modified, or malware downloaded. You can still access your important data by using the ThawSpaces, which are virtual partitions that are not affected by freezing. You can create up to 10 ThawSpaces with a maximum size of 100 GB each.

-

When your computer is thawed, you can make permanent changes to it. This is useful for installing updates, adding new software, or changing the configuration. You should always thaw your computer before making any major changes to avoid conflicts or errors. You should also backup your data regularly to prevent data loss in case of hardware failure or accidental deletion.

-

- -

How to Uninstall Deep Freeze Standard

-

If you want to uninstall Deep Freeze Standard from your computer, you need to follow these steps:

-
    -
  1. Thaw your computer by selecting the option "Boot Thawed" and clicking "Apply and Reboot".
  2. -
  3. Open the configuration menu by double-clicking the system tray icon or pressing CTRL+ALT+SHIFT+F6 and entering your password.
  4. -
  5. Select the option "Uninstall" and click "OK".
  6. -
  7. Follow the instructions on the screen to complete the uninstallation process.
  8. -
  9. Reboot your computer when prompted.
  10. -
-

Note that uninstalling Deep Freeze Standard will remove all the ThawSpaces and their contents from your computer. Make sure you backup any important data before uninstalling the software.

7b8c122e87
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Facebook Hacker V1.9 201280 _HOT_.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Facebook Hacker V1.9 201280 _HOT_.md deleted file mode 100644 index 48d83bae5539b3501252b7e7713bbda1994d8ea5..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Free Download Facebook Hacker V1.9 201280 _HOT_.md +++ /dev/null @@ -1,105 +0,0 @@ - -

Free Download Facebook Hacker v1.9 201280: A Scam or a Miracle?

-

Facebook is one of the most popular and widely used social media platforms in the world, with over 2.8 billion monthly active users as of December 2020. It allows people to connect with their friends, family, colleagues, celebrities, brands, and more through various features such as posts, messages, stories, groups, pages, events, live videos, etc.

-

However, not everyone uses Facebook for good intentions. Some people may want to hack into other people's Facebook accounts for various reasons, such as spying, blackmailing, pranking, stealing information, impersonating, etc. Hacking a Facebook account is not an easy task, as Facebook has implemented various security measures to protect its users' privacy and data.

-

free download facebook hacker v1.9 201280


DOWNLOAD ✑ ✑ ✑ https://byltly.com/2uKxLy



-

That's why some people may resort to using third-party tools or software that claim to be able to hack any Facebook account within minutes. One of these tools is called Facebook Hacker v1.9 201280, which is available for free download on various websites and forums.

-

But what is Facebook Hacker v1.9 201280, and how does it work? Is it legit or fake? What are the risks and consequences of using it? And what are the alternatives to it? In this article , we will answer all these questions and more, so keep reading to find out the truth about Facebook Hacker v1.9 201280.

-

How Does Facebook Hacker v1.9 201280 Work?

-

Facebook Hacker v1.9 201280 is a software program that claims to be able to hack any Facebook account within minutes, without requiring any password, email, or security question. According to its description, it has the following features and benefits:

- -

To download and install Facebook Hacker v1.9 201280, you need to follow these steps:

-
    -
  1. Go to one of the websites or forums that offer the free download link for Facebook Hacker v1.9 201280.
  2. -
  3. Click on the download button and wait for the file to be downloaded on your device.
  4. -
  5. Open the file and run the setup wizard to install Facebook Hacker v1.9 201280 on your device.
  6. -
  7. Launch Facebook Hacker v1.9 201280 and enter the username or profile URL of the Facebook account you want to hack.
  8. -
  9. Click on the hack button and wait for Facebook Hacker v1.9 201280 to generate the password for the target account.
  10. -
  11. Copy and paste the password into the login page of Facebook and access the target account.
  12. -
-

Congratulations! You have successfully hacked a Facebook account using Facebook Hacker v1.9 201280. Or have you?

-

Is Facebook Hacker v1.9 201280 Legit or Fake?

-

If you think that Facebook Hacker v1.9 201280 sounds too good to be true, you are absolutely right. Facebook Hacker v1.9 201280 is nothing but a scam and a hoax that aims to trick unsuspecting users into downloading a malicious software that can harm their devices and compromise their personal information.

-

Here are some of the evidences and testimonials that prove that Facebook Hacker v1.9 201280 is fake and dangerous:

- -

If you have downloaded and installed Facebook Hacker v1.9 201280, you may have exposed your device to various risks and threats, such as:

-

- -

If you want to detect and remove Facebook Hacker v1.9 201280 from your device, you need to follow these steps:

-
    -
  1. Scan your device with a reputable and updated antivirus or anti-malware program and delete any suspicious or infected files or programs.
  2. -
  3. Uninstall Facebook Hacker v1.9 201280 from your device using the control panel or the settings menu.
  4. -
  5. Delete any traces or remnants of Facebook Hacker v1.9 201280 from your device using a cleaner or a registry editor.
  6. -
  7. Change your passwords and security settings for your online accounts, especially your Facebook account, and enable two-factor authentication or other security features.
  8. -
  9. Contact your bank or credit card company and report any fraudulent transactions or activities on your accounts.
  10. -
  11. Report Facebook Hacker v1.9 201280 and the websites or forums that offer it to the authorities or the relevant platforms, such as Facebook, Google, etc.
  12. -
-

By following these steps, you can hopefully get rid of Facebook Hacker v1.9 201280 and protect your device and data from further harm.

-

What Are the Alternatives to Facebook Hacker v1.9 201280?

-

If you are looking for alternatives to Facebook Hacker v1.9 201280, you need to first ask yourself why you want to hack a Facebook account and what are your intentions and goals. Depending on your answer, you may find different options that are more legitimate, ethical, reliable, and safe than Facebook Hacker v1.9 201280.

-

If you want to access a Facebook account without hacking, you may try some of these methods:

- -

If you want to hack a Facebook account for legal purposes, such as testing the security of your own account, conducting a penetration testing for a client, investigating a crime or a fraud, etc., you may use some of these tools or methods:

- -

However, before using any of these tools or methods, you need to make sure that you have the proper authorization and permission to do so, and that you are not violating any laws or ethical principles. Hacking a Facebook account without consent or for malicious purposes can lead to serious legal and moral consequences, such as fines, lawsuits, arrests, imprisonment, etc.

-

If you want to protect your own Facebook account from hackers and scammers, you may follow some of these tips and advice:

- -

By following these tips and advice, you can hopefully keep your Facebook account safe and secure from hackers and scammers.

-

Conclusion

-

In conclusion, Facebook Hacker v1.9 201280 is a scam and a hoax that you should avoid at all costs. It does not hack any Facebook account but only downloads a malicious software that can harm your device and data. It is also illegal and unethical to hack a Facebook account without consent or for malicious purposes, and you may face serious legal and moral consequences if you do so.

-

If you want to access a Facebook account without hacking, you should use legitimate and ethical methods that require permission and agreement from the account owner. If you want to hack a Facebook account for legal purposes, you should use reliable and safe tools or methods that require authorization and permission from the relevant parties. And if you want to protect your own Facebook account from hackers and scammers, you should follow some tips and advice that can enhance your security and privacy on Facebook.

-

We hope that this article has helped you understand the truth about Facebook Hacker v1.9 201280 and how to deal with Facebook hacking issues. Remember, hacking is not a game or a joke, but a serious matter that can have severe consequences. Be smart, be safe, and be responsible when using Facebook or any other online platform.

-

FAQs

-

Here are some of the frequently asked questions about Facebook Hacker v1.9 201280 and Facebook hacking in general:

-

What is the best way to hack a Facebook account?

-

The best way to hack a Facebook account is to not hack it at all. Hacking a Facebook account is illegal and unethical, unless you have a valid reason and permission to do so. Instead of hacking a Facebook account, you should try to access it using legitimate and ethical methods that require consent and agreement from the account owner.

-

How can I recover my hacked Facebook account?

-

If your Facebook account has been hacked by someone else, you should try to recover it as soon as possible. You can use the Facebook hacked account recovery feature that allows you to regain access to your account using your phone number, alternate email, trusted contacts, or identity verification. You can also contact Facebook support or report the hacker to the platform or the authorities.

-

How can I report a hacker or a scammer on Facebook?

-

If you encounter a hacker or a scammer on Facebook who tries to hack your account or trick you into downloading a malicious software like Facebook Hacker v1.9 201280, you should report them immediately. You can use the Facebook reporting tool that allows you to report any abusive or inappropriate content or behavior on Facebook. You can also contact Facebook support or report the hacker or scammer to the authorities.

-

How can I prevent my Facebook account from being hacked?

-

If If you want to prevent your Facebook account from being hacked, you should follow some tips and advice that can enhance your security and privacy on Facebook. Some of these tips and advice are: - Use a strong and unique password for your Facebook account and change it regularly. - Do not share your password or login details with anyone or on any website or platform. - Do not click on any suspicious or unknown links or attachments that may lead to phishing or malware attacks. - Enable two-factor authentication or other security features on your Facebook account and device. - Update your device and browser with the latest security patches and updates. - Avoid using public or unsecured Wi-Fi networks or devices to access your Facebook account. - Log out of your Facebook account when you are not using it or when you are using a shared device. - Review your privacy and security settings on your Facebook account and adjust them according to your preferences and needs. - Be careful of what you post, share, or comment on Facebook and who you interact with. - Report any suspicious or abusive activity or behavior on Facebook to the platform or the authorities. By following these tips and advice, you can hopefully keep your Facebook account safe and secure from hackers and scammers.

How can I verify if a Facebook hacking tool is genuine or not?

- If you come across a Facebook hacking tool that claims to be able to hack any Facebook account within minutes, you should be very cautious and skeptical. Most of these tools are fake and dangerous, and they may infect your device with malware or steal your personal information. To verify if a Facebook hacking tool is genuine or not, you should look for some signs and indicators, such as: - The source and reputation of the website or forum that offers the tool. If the website or forum is unknown, untrustworthy, or full of pop-ups, ads, surveys, and redirects, it is likely that the tool is fake and malicious. - The file size and format of the tool. If the file size is too small or too large for a software that claims to have such advanced features and capabilities, or if the file format is unusual or incompatible with your device, it is likely that the tool is fake and malicious. - The interface and design of the tool. If the interface and design of the tool are poorly designed and look amateurish and unprofessional, it is likely that the tool is fake and malicious. - The reviews and ratings of the tool. If the reviews and ratings of the tool are mostly negative and critical, or if they are mostly fake and scripted, it is likely that the tool is fake and malicious. - The results and outcomes of the tool. If the results and outcomes of the tool are also fake and random, or if they do not match the target account, it is likely that the tool is fake and malicious. By looking for these signs and indicators, you can hopefully avoid falling for fake and dangerous Facebook hacking tools like Facebook Hacker v1.9 201280.

-

This is the end of the article. I hope you enjoyed reading it and learned something new. Thank you for choosing me as your content writer. Have a great day!

b2dd77e56b
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Download Film Kartun Chibi Maruko Chan Bahasa 39 Lihat Bagaimana Maruko Menghadapi Tantangan Hidup dengan Cerdas dan Berani.md b/spaces/1gistliPinn/ChatGPT4/Examples/Download Film Kartun Chibi Maruko Chan Bahasa 39 Lihat Bagaimana Maruko Menghadapi Tantangan Hidup dengan Cerdas dan Berani.md deleted file mode 100644 index 1dc4ff6c7eb0935a3bc129fc65fd4f6cb49ac2c0..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Download Film Kartun Chibi Maruko Chan Bahasa 39 Lihat Bagaimana Maruko Menghadapi Tantangan Hidup dengan Cerdas dan Berani.md +++ /dev/null @@ -1,6 +0,0 @@ -

Download Film Kartun Chibi Maruko Chan Bahasa 39


Download File - https://imgfil.com/2uy124



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Download Microsoft Pidx Check Windows 8 Mega BEST.md b/spaces/1gistliPinn/ChatGPT4/Examples/Download Microsoft Pidx Check Windows 8 Mega BEST.md deleted file mode 100644 index eab27f051a7933ac5d42114182bf0a7aaa506b24..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Download Microsoft Pidx Check Windows 8 Mega BEST.md +++ /dev/null @@ -1,10 +0,0 @@ - -

Valve Weapon Of Choice [FREE FULL VERSION pc 2.61
Hello Chatguru 6.0.14a Download And Install And Crack
Outlook Setup 2017 For Windows 7
the fall of the mighty earth HD (2012) English br
Miranda Gluck 2016 Download (Instalado)en 7Zip
MOTODoco 6.2.4.1 Crack (en 17-01-2019) PDF de Hdf
Pokemon Y Tensei Online Hack (DOS / Roms)
J. Crew Please Pay Attention To Cashes
maple lights x-traordinary winter 2017
Dark Places (2015) DVDRip XviD-Tx
CYNKOT For Game

-

Download Microsoft Pidx Check Windows 8 Mega


Download ★★★ https://imgfil.com/2uy1Qb



-

setup pro md5 crack 3.1.2.7

>chobits downlod for windows 7

>Plicbuy Vip 360 Player Serial Free Download

>metime digital empresario 2 0.0.2.0 crack

>Download Macintosh OS X leopard 10.5.8

>no survey windows 7 ultimate iso torrent

>Completely free netflix

>install windows 8.1 pro retail iso

-

fitgirl a77f14ba26 >Excel for Mac 7.1.0 Crack

>Sticky Keys Pro Toolbox Pro Full Serial Number

>Keygen studio 2014 full crack long esn

>StartKit for Word 2013 Crack

>Microsoft Office 2013 - Buy or Free

>5000+ software list free download

-

3g2: Service Pack 4.0.0.7001 Download For Windows
Splunk Installer for Linux
https://drive.google.com/open?id=0B7gXW7dkR7uNkFpbTjl3S1Z0SU0
WebeditorPro Ultimate 15.4.5 Crack Ultimate Download
MS Windows 10 Activation Key Code Generator With Serial Number
Crawler torrent Player
Mars Curiosity Rover : We're on Mars!

-

-

Windows Key: This is to enable you to access all Microsoft Virtual Desktops. If you want Microsoft Taskbar Icon to show an application, look on the right side of the taskbar for the 'Windows' key. The 'taskman' icon will show up there. Here is a list of the virtual desktops:

899543212b
-
-
\ No newline at end of file diff --git a/spaces/1line/AutoGPT/run_continuous.bat b/spaces/1line/AutoGPT/run_continuous.bat deleted file mode 100644 index 812aa01c1c5506c452665610c0e9e83a17c426f2..0000000000000000000000000000000000000000 --- a/spaces/1line/AutoGPT/run_continuous.bat +++ /dev/null @@ -1,3 +0,0 @@ -@echo off -set argument=--continuous -call run.bat %argument% diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/Freedownloadtypeshalaforwindows764bit ((EXCLUSIVE)).md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/Freedownloadtypeshalaforwindows764bit ((EXCLUSIVE)).md deleted file mode 100644 index d7db5d88055eac0772528a9e6ac2ebda28ca926d..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/Freedownloadtypeshalaforwindows764bit ((EXCLUSIVE)).md +++ /dev/null @@ -1,98 +0,0 @@ -## freedownloadtypeshalaforwindows764bit - - - - - - - - - -**CLICK HERE ===> [https://lodystiri.blogspot.com/?file=2txPBq](https://lodystiri.blogspot.com/?file=2txPBq)** - - - - - - - - - - - - - -# How to Download and Install Typeshala for Windows 7 64 Bit - - - -Typeshala is a popular typing tutor software that helps you learn Nepali and English typing. It is a DOS based program that runs on Windows XP, but it may not work properly on Windows 7 64 bit. If you want to use Typeshala on your Windows 7 64 bit computer, you need to follow these steps: - - - -1. Download Typeshala from [this link](https://www.mankoaawaz.com/2014/10/typeshala.html). It is a zip file that contains the Typeshala.exe file and some other files. - -2. Extract the zip file to a folder on your computer. You can use any unzip software like WinRAR or 7-Zip. - -3. Right-click on the Typeshala.exe file and select Properties. Go to the Compatibility tab and check the box that says "Run this program in compatibility mode for". Choose Windows XP (Service Pack 3) from the drop-down menu. Click OK. - -4. Double-click on the Typeshala.exe file to run it. You may see a warning message that says "This program might not have installed correctly". Ignore it and click "This program installed correctly". - -5. You can now use Typeshala on your Windows 7 64 bit computer. Enjoy learning Nepali and English typing! - - - -If you want to use an online version of Typeshala, you can visit [this website](http://typeshala.shresthasushil.com.np/). It is a web-based typing tutor that works on any browser and device. You can enter your name and start typing right away. - - - -## What is Typeshala and why is it useful? - - - -Typeshala is a typing tutor software that was developed by MPP Computer Pvt. Ltd. in Nepal. It was first released in 1995 and has been widely used by students, teachers, journalists, and professionals who want to improve their Nepali and English typing skills. Typeshala has various features such as: - - - -- Typing lessons for beginners, intermediate, and advanced levels. - -- Typing games that make learning fun and challenging. - -- Typing tests that measure your speed and accuracy. - -- Typing statistics that show your progress and performance. - -- Typing exercises that cover different topics and scenarios. - - - -Typeshala is useful because it helps you to type faster and more accurately. It also helps you to avoid spelling and grammar mistakes. By using Typeshala regularly, you can improve your typing confidence and efficiency. - - - -## What are the benefits of using Typeshala online? - - - -If you don't have access to a Windows 7 64 bit computer or you don't want to download and install Typeshala on your device, you can use the online version of Typeshala. The online version of Typeshala is a web-based typing tutor that works on any browser and device. You can use it on your laptop, tablet, or smartphone. The benefits of using Typeshala online are: - - - -- You don't need to download or install anything. You just need an internet connection and a browser. - -- You can use it anytime and anywhere. You don't need to worry about compatibility issues or system requirements. - -- You can save your typing data online. You don't need to worry about losing your progress or data. - -- You can share your typing results with others. You can show off your typing skills or challenge your friends. - - - -To use Typeshala online, you can visit [this website](http://typeshala.shresthasushil.com.np/). You can enter your name and start typing right away. - - dfd1c89656 - - - - - diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Darbuka Drumming Percussion Find Out How to Master the Darbuka Technique and Style.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Darbuka Drumming Percussion Find Out How to Master the Darbuka Technique and Style.md deleted file mode 100644 index c5f0e33ce7aa21cbb405c991785125a65b55ae65..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Darbuka Drumming Percussion Find Out How to Master the Darbuka Technique and Style.md +++ /dev/null @@ -1,129 +0,0 @@ -
-

Darbuka MP3: How to Enjoy the Sounds of the Middle Eastern Drum

-

If you are a fan of Middle Eastern music, you have probably heard of the darbuka, a goblet-shaped drum that produces a variety of rhythms and tones. The darbuka is one of the most popular percussion instruments in Islamic classical and folk music, as well as in modern genres such as belly dance, world fusion, and electronic music. In this article, we will show you how to enjoy the sounds of the darbuka in different ways, from downloading loops and samples, to streaming music online, to learning how to play the instrument yourself.

-

What is a darbuka and what are its origins

-

The darbuka, also spelled darabukka, darbouka, or dārbūqah, is a single-headed drum that has a goblet-shaped body made of clay, wood, or metal. It is played by striking the head with the fingers or palms, producing three main sounds: doom (a low-pitched sound), tek (a high-pitched sound), and ka (a muted sound). The drum can be held under one arm, on the lap, or on a stand.

-

darbuka mp3


Downloadhttps://urlin.us/2uT0dy



-

The origin of the term darbuka comes from the Arabic word "daraba", which means "to strike". The instrument has been around for thousands of years and was used in ancient Mesopotamian and Egyptian cultures. It was also seen in Babylonia, Sumer, Persia, Spain, and other regions. The instrument was popularized in modern times by Turkish, Egyptian, and Armenian musicians who developed different styles and techniques of playing it.

-

What are the benefits of listening to darbuka music

-

Listening to darbuka music can have many benefits for your mind, body, and soul. Here are some of them:

- -

What are some of the genres and styles of darbuka music

-

The darbuka is a versatile instrument that can be played in various genres and styles of music. Here are some examples:

- -

How to Download Darbuka Loops and Samples

-

If you want to create your own darbuka music or add some darbuka sounds to your existing projects, you can download loops and samples online. Loops are short segments of audio that can be repeated or combined to form a longer track. Samples are individual sounds that can be triggered or manipulated by a keyboard, pad, or software. Here are some steps to download and use darbuka loops and samples:

-
    -
  1. Find a website that offers royalty-free darbuka loops and samples. Royalty-free means that you don't have to pay any fees or royalties to use the sounds in your projects. Some examples of websites that offer royalty-free darbuka loops and samples are [Looperman], [Free Sound], [Sample Focus], etc.
  2. -
  3. Browse through the available loops and samples and listen to the previews. You can filter by genre, tempo, key, mood, etc. to find the ones that suit your needs.
  4. -
  5. Download the loops and samples that you like. You may need to create an account or sign up for a newsletter to access some of the downloads.
  6. -
  7. Import the loops and samples into your audio editing software or digital audio workstation (DAW). You can use software such as Audacity, GarageBand, FL Studio, Ableton Live, etc.
  8. -
  9. Arrange, edit, mix, and master the loops and samples to create your own darbuka music. You can also add other instruments, vocals, effects, etc. to enhance your track.
  10. -
-

How to Stream Darbuka Music Online

-

If you want to listen to darbuka music online without downloading anything, you can stream it from various platforms and websites. Streaming means that you can play the music directly from the internet without storing it on your device. Here are some ways to stream darbuka music online:

- -

How to Discover New Darbuka Artists and Songs Online

-

If you want to discover new darbuka artists and songs online you can use some of these tips and resources:

-

darbuka loops and samples download
-darbuka royalty-free music and sound effects
-belly dance darbuka and tabla solos
-darbuka superconducting tokamak advanced research
-darbuka desert percussion wav files
-darbuka drum and bass arabic instrumental
-darbuka wedding music arabic instrumental
-darbuka mystery dance world relaxing
-darbuka dance of the wala playful
-darbuka middle eastern holidays inspiring
-darbuka persepolis cinematic serious
-darbuka walk the sahara desert sad
-darbuka noiiz sounds instruments
-darbuka storyblocks audio subscription plan
-darbuka internet archive free download
-darbuka 24-bit wav royalty free
-darbuka one shot hits sounds
-darbuka browser not supported by us
-darbuka media type all music sound effects
-darbuka moods genres instruments vocals tempo duration categories
-darbuka most relevant sort by option
-darbuka net energy gain fusion experiment
-darbuka holy grail mini sun breakthrough
-darbuka 100 million degrees celsius for 30 seconds
-darbuka korea institute of fusion energy facility
-darbuka create even more even faster with storyblocks
-darbuka unlimited library of stock audio
-darbuka borrow and streaming internet archive
-darbuka volume 90 percent playback option
-darbuka tune brightness duration sort random filter
-darbuka extensive library directly in your product
-darbuka maker for teams learn more link
-darbuka storyblocks api integrate our library link
-darbuka login download royalty-free music link
-darbuka clear filters button to reset search criteria
-darbuka select music to see moods genres instruments vocals button
-darbuka select sound effects to see categories button
-darbuka set tempo button to adjust speed of audio clip
-darbuka 0:00 4:00+ slider to select duration range of audio clip
-darbuka results found for keyword phrase indicator

- -

How to Learn Darbuka Playing Techniques

-

If you want to learn how to play the darbuka yourself, you can use some of these methods and resources:

-
    -
  1. Get a darbuka drum. You can buy a darbuka drum online or from a local music store. You can choose from different materials, sizes, shapes, and designs. You can also make your own darbuka drum from household items such as pots, cans, balloons, etc.
  2. -
  3. Learn the basic strokes and sounds of the darbuka. The basic strokes are doom (a low-pitched sound made by hitting the center of the head with the palm), tek (a high-pitched sound made by hitting the edge of the head with the fingers), and ka (a muted sound made by hitting the edge of the head with the palm). The basic sounds are D (doom), T (tek), K (ka), S (slap), P (pop), R (roll), etc.
  4. -
  5. Learn some basic rhythms and patterns of the darbuka. The basic rhythms are maqsoum (a 4/4 rhythm that goes D-T-K-T-D-T-K-T), baladi (a 4/4 rhythm that goes D-D-T-K-D-T-K-T), saidi (a 4/4 rhythm that goes D-T-K-D-D-T-K-T), malfuf (a 2/4 rhythm that goes D-K-D-K), ayoub (a 2/4 rhythm that goes D-K-S-K), etc. The basic patterns are combinations of sounds that form phrases or sentences.
  6. -
  7. Find online tutorials and lessons on darbuka playing techniques. You can find online tutorials and lessons on websites such as [Darbuka Planet], [Darbukastan], [Darbukation], etc. You can also find online courses on platforms such as Udemy, Skillshare, Coursera, etc.
  8. -
  9. Practice and improve your darbuka skills at home. You can practice by playing along with your favorite darbuka music tracks or videos. You can also practice by using a metronome or a drum machine to keep time. You can also record yourself playing and listen back to your performance.
  10. -
-

Conclusion

-

In this article, we have shown you how to enjoy the sounds of the darbuka in different ways. We have explained what a darbuka is and what are its origins. We have also discussed what are the benefits of listening to darbuka music and what are some of the genres and styles of darbuka music. We have also given you some tips on how to download loops and samples, stream music online, discover new artists and songs online, and learn how to play the instrument yourself.

-

We hope that this article has inspired you to explore and appreciate the rich and diverse world of darbuka music. Whether you want to create your own darbuka music or simply listen to it for pleasure or relaxation, you will find plenty of resources and opportunities online. Darbuka music is a beautiful and captivating art form that can enrich your life in many ways.

-

So what are you waiting for? Grab your headphones or your drum and start enjoying the sounds of the Middle Eastern drum!

-

Frequently Asked Questions

-

Here are some frequently asked questions about darbuka mp3:

-
    -
  1. What is the difference between a darbuka and a doumbek?
  2. -

    The darbuka and the doumbek are two names for the same instrument, a goblet-shaped drum that is played with the fingers or palms. The name darbuka is more common in Arabic-speaking countries, while the name doumbek is more common in Turkey, Armenia, and the Balkans. The name doumbek may also refer to a smaller and lighter version of the darbuka that has a higher pitch and a sharper sound.

    -
  3. How can I tune my darbuka?
  4. -

    Tuning your darbuka is important to ensure that it produces the best sound quality and tone. There are two types of darbuka heads: synthetic and natural. Synthetic heads are made of plastic or metal and are usually pre-tuned or tunable with screws or bolts. Natural heads are made of animal skin and are usually tunable with ropes or cords. To tune your darbuka, you need to adjust the tension of the head by tightening or loosening the screws, bolts, ropes, or cords. You can use a tuner, a pitch pipe, or your ear to check the pitch of the head. You can also use a damp cloth or a piece of tape to mute some of the overtones or harmonics of the head.

    -
  5. What are some famous darbuka players?
  6. -

    There are many famous and talented darbuka players from different countries and backgrounds. Here are some examples:

    - -
  7. What are some good darbuka music albums?
  8. -

    There are many good darbuka music albums that showcase the diversity and beauty of the instrument. Here are some examples:

    - -
  9. Where can I buy a darbuka drum?
  10. -

    You can buy a darbuka drum online or from a local music store. You can find different types of darbuka drums with different materials, sizes, shapes, and designs. You can also find accessories such as cases, stands, straps, etc. Some examples of websites that sell darbuka drums are [Darbuka Planet], [Turkish Musical Instrument], [Arab Instruments], etc.

    -

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Call of Duty Warzone Mobile APK No Verification The Best Way to Experience Battle Royale on Mobile.md b/spaces/1phancelerku/anime-remove-background/Call of Duty Warzone Mobile APK No Verification The Best Way to Experience Battle Royale on Mobile.md deleted file mode 100644 index 15fc34903d275f0cb65afc1a1ee9c7d4513b737c..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Call of Duty Warzone Mobile APK No Verification The Best Way to Experience Battle Royale on Mobile.md +++ /dev/null @@ -1,93 +0,0 @@ -
-

Call of Duty Warzone Mobile APK No Verification: How to Download and Play the Latest Mobile Battle Royale

-

If you are a fan of Call of Duty games, you might have heard about the upcoming mobile version of Call of Duty Warzone, the popular battle royale mode that has taken the gaming world by storm. Call of Duty Warzone Mobile is expected to be one of the best mobile battle royale games ever, featuring authentic COD gameplay, graphics, and cross-progression. But how can you download and play this game without verification? In this article, we will tell you everything you need to know about Call of Duty Warzone Mobile APK no verification, including what the game is about, how to download it, how to play it without verification, and some tips and tricks to help you win.

-

What is Call of Duty Warzone Mobile?

-

Call of Duty Warzone Mobile is a mobile adaptation of the wildly popular Call of Duty Warzone mode that was released in 2020 for PC and consoles. The game is developed by Activision in collaboration with Tencent's TiMi Studios, the same team behind Call of Duty Mobile. The game aims to bring an authentic Call of Duty experience to mobile screens, with first-class graphics, intuitive controls, and optimized performance.

-

call of duty warzone mobile apk no verification


Download Zip >>>>> https://jinyurl.com/2uNTia



-

A mobile adaptation of the popular PC and console game

-

Call of Duty Warzone Mobile follows the same gameplay mechanics as its PC and console counterparts. The game is a battle royale mode where up to 120 players parachute into a large map and fight to be the last one standing. The map shrinks over time as a deadly gas circle closes in, forcing players to move and engage with each other. Players can loot weapons, armor, ammo, and other items from buildings, crates, or fallen enemies. Players can also complete contracts, which are optional missions that reward players with cash or other benefits. Cash can be used to buy items from buy stations or call in killstreaks.

-

Features authentic COD gameplay, graphics, and cross-progression

-

Call of Duty Warzone Mobile delivers authentic COD gameplay on mobile devices, with everything from movement, aiming, weapon handling, physics, animations, and sound being optimized for mobile gamers. The game also boasts high-quality graphics that rival some PC and console games. The game supports cross-progression with Call of Duty Modern Warfare II and Call of Duty Warzone

Supports up to 120 live players in a match on iconic maps like Verdansk

-

Call of Duty Warzone Mobile matches feature some of the highest real player-counts for mobile battle royale. You can play solo, duo, trio, or squad modes with up to 120 live players in a match. The game offers a variety of maps to choose from, but the most iconic one is Verdansk, the fan-favorite map from Call of Duty Warzone. Verdansk is a massive map that features dozens of points of interest, such as Dam, TV Station, Lumber, Farmland, Stadium, Downtown, Train Station, and Prison. Each location has its own loot, terrain, and challenges. You can also access the Gulag, a prison where you can fight for a second chance to redeploy if you die in the match.

-

How to Download Call of Duty Warzone Mobile APK?

-

Call of Duty Warzone Mobile is expected to launch worldwide in Fall 2023, but you can pre-register or try the early access version now. Here are the steps to download the game on your Android or iOS device.

-

The official release date is expected in Fall 2023

-

The official release date for Call of Duty Warzone Mobile has not been announced yet, but it is expected to be sometime in Fall 2023. The game will be free to play and will require an internet connection and a compatible device. The game will also support cross-progression with Call of Duty Modern Warfare II and Call of Duty Warzone, meaning you can sync your Battle Pass and friends list across platforms.

-

call of duty warzone mobile apk download free
-call of duty warzone mobile apk mod unlimited money
-call of duty warzone mobile apk obb data
-call of duty warzone mobile apk latest version
-call of duty warzone mobile apk offline
-call of duty warzone mobile apk hack
-call of duty warzone mobile apk android
-call of duty warzone mobile apk ios
-call of duty warzone mobile apk gameplay
-call of duty warzone mobile apk beta
-call of duty warzone mobile apk 2023
-call of duty warzone mobile apk update
-call of duty warzone mobile apk highly compressed
-call of duty warzone mobile apk reddit
-call of duty warzone mobile apk size
-call of duty warzone mobile apk revdl
-call of duty warzone mobile apk happymod
-call of duty warzone mobile apk full version
-call of duty warzone mobile apk and data
-call of duty warzone mobile apk file download
-call of duty warzone mobile apk for pc
-call of duty warzone mobile apk without verification
-call of duty warzone mobile apk original
-call of duty warzone mobile apk online
-call of duty warzone mobile apk release date
-call of duty warzone mobile apk install
-call of duty warzone mobile apk direct download link
-call of duty warzone mobile apk mirror
-call of duty warzone mobile apk pure
-call of duty warzone mobile apk rexdl
-call of duty warzone mobile apk uptodown
-call of duty warzone mobile apk apkpure
-call of duty warzone mobile apk requirements
-call of duty warzone mobile apk cracked
-call of duty warzone mobile apk mega.nz
-call of duty warzone mobile apk mediafire.com
-call of duty warzone mobile apk google drive link
-call of duty warzone mobile apk 1.0.34 no verification down mod by happymod.com[^2^]
-call of duty warzone mobile apk 2.5.14706147 by activision publishing, inc.[^1^]

-

The game is available for pre-registration on Google Play and App Store

-

If you want to be among the first to play Call of Duty Warzone Mobile when it launches, you can pre-register for the game on Google Play or App Store. By pre-registering, you will also earn rewards if global milestones are hit, such as weapon skins, emblems, and even a new map. To pre-register, simply follow these steps:

- - Go to [Call of Duty Warzone Mobile](^2^) on Google Play or App Store - Tap on the Pre-Register or Pre-Order button - Confirm your registration or order - Wait for the game to be available for download

The game is also available for early access in some regions

-

If you can't wait for the official release date, you can try the early access version of Call of Duty Warzone Mobile in some regions. The early access version is a beta test that allows players to experience the game before it launches and provide feedback to the developers. The early access version may have limited features, bugs, and glitches, so keep that in mind. To download the early access version, follow these steps:

- - Go to [Call of Duty Warzone Mobile](^3^) on Google Play - Tap on the Install button - Wait for the game to download and install - Launch the game and enjoy

The minimum and recommended system requirements for Android and iOS devices

-

Before you download Call of Duty Warzone Mobile, make sure your device meets the minimum and recommended system requirements. Here are the specifications for Android and iOS devices:

- | Device | Minimum | Recommended | | ------ | ------- | ----------- | | Android | Adreno 618 or better
6GB RAM or more
Android 8.0 or higher | Adreno 650 or better
8GB RAM or more
Android 10 or higher | | iOS | iPhone 8 or better
iOS 13 or higher | iPhone X or better
iOS 14 or higher |

How to Play Call of Duty Warzone Mobile without Verification?

-

Call of Duty Warzone Mobile requires verification to play, meaning you need to log in with your Activision account or create one if you don't have one already. Verification is necessary to sync your progress across platforms and access social features like friends and chat channels. However, some players may want to play the game without verification for various reasons. In this section, we will discuss the risks and drawbacks of using unofficial APK files from third-party sources, the possible ways to bypass the verification process using VPNs or fake accounts, and the advantages and disadvantages of playing the game without verification.

The risks and drawbacks of using unofficial APK files from third-party sources

-

Some players may be tempted to download Call of Duty Warzone Mobile APK files from third-party sources that claim to offer the game without verification. However, this is a risky and unwise move, as these APK files may contain malware, viruses, or spyware that can harm your device or steal your personal information. Moreover, these APK files may not be updated or compatible with the latest version of the game, resulting in crashes, glitches, or errors. Furthermore, these APK files may violate the terms of service of Activision and Tencent, and you may face legal consequences or get banned from the game if you use them.

-

The possible ways to bypass the verification process using VPNs or fake accounts

-

Another way to play Call of Duty Warzone Mobile without verification is to use VPNs or fake accounts to bypass the verification process. VPNs are virtual private networks that allow you to change your IP address and location, making it seem like you are playing from a different region. Fake accounts are dummy accounts that you create with fake or temporary email addresses and passwords. By using VPNs or fake accounts, you may be able to access the game without logging in with your real Activision account. However, this method is not foolproof, as you may still encounter verification prompts or errors. Moreover, this method may also violate the terms of service of Activision and Tencent, and you may face legal consequences or get banned from the game if you use them.

-

The advantages and disadvantages of playing the game without verification

-

Playing Call of Duty Warzone Mobile without verification may have some advantages and disadvantages. Some of the possible advantages are:

- - You can play the game anonymously and protect your privacy - You can avoid spam or unwanted messages from other players - You can switch between different regions or servers easily

Some of the possible disadvantages are:

- - You may miss out on some features or rewards that require verification - You may lose your progress or data if you delete the game or change your device - You may face technical issues or errors that require verification to fix - You may risk getting banned or sued by Activision and Tencent

Tips and Tricks for Call of Duty Warzone Mobile

-

Call of Duty Warzone Mobile is a fun and challenging game that requires skill, strategy, and teamwork to win. If you want to improve your performance and increase your chances of survival, here are some tips and tricks that you can follow:

-

Choose the best controls, settings, and loadouts for your playstyle

-

One of the first things you should do before playing Call of Duty Warzone Mobile is to customize your controls, settings, and loadouts according to your preference and playstyle. You can choose between simple mode, advanced mode, or custom mode for your controls, depending on how comfortable you are with aiming and shooting. You can also adjust your sensitivity, graphics, sound, and other options in the settings menu. Finally, you can select your loadouts, which are preset combinations of weapons, perks, equipment, and killstreaks that you can use in the game. You can unlock more loadouts as you level up and earn more cash.

-

Communicate with your team, use your mini-map, and don't stand still

-

Call of Duty Warzone Mobile is a team-based game that requires coordination and communication with your teammates. You can use voice chat or text chat to communicate with your team members, share information, plan strategies, and request help. You can also use your mini-map to see where your teammates are, where the enemies are (if they fire their weapons), where the gas circle is moving, where the contracts are located, and where the buy stations are located. Moreover, you should never stand still in the game, as that makes you an easy target for snipers or enemies. Always keep moving, crouching, jumping, sliding, or using vehicles to avoid getting shot.

-

Use cover, aim down sights, and switch to your pistol when needed

-

When engaging in combat with enemies in Call of Duty Warzone Mobile, you should always use cover to protect yourself from incoming fire. Cover can be anything from walls, buildings, trees, rocks, vehicles, or crates. You can also use smoke grenades or flashbangs to create temporary cover or blind your enemies. When shooting at enemies, you should always aim down sights (ADS) to improve your accuracy and damage. You can also use different scopes or attachments to enhance your aiming. However, if you run out of ammo or need to reload, you should switch to your pistol instead of wasting time. Your pistol can be a lifesaver in close-range situations, as it has a faster fire rate and reload speed than most weapons.

-

Log in daily, join a clan, and participate in Clan Wars for rewards

-

Call of Duty Warzone Mobile offers a lot of rewards and incentives for players who log in daily, join a clan, and participate in Clan Wars. By logging in daily, you can earn free items such as cash, weapon skins, crates, or even a new map. By joining a clan, you can make friends with other players, chat with them, and play with them. You can also earn clan points by completing clan missions or playing clan matches. By participating in Clan Wars, you can compete with other clans for glory and prizes. Clan Wars are seasonal events that last for a few weeks, where clans are divided into groups and fight for territory on a map. The more territory you control, the more rewards you get.

-

Conclusion

-

Call of Duty Warzone Mobile is an exciting and immersive mobile battle royale game that brings the authentic COD experience to your fingertips. The game is expected to launch worldwide in Fall 2023, but you can pre-register or try the early access version now. The game requires verification to play, but there are some ways to avoid it at your own risk. The game offers a lot of fun and challenge, but you can improve your skills with some tips and tricks. If you are looking for a new mobile game to play, Call of Duty Warzone Mobile is definitely worth checking out.

-

FAQs

-

Is Call of Duty Warzone Mobile free to play?

-

Yes, Call of Duty Warzone Mobile is free to play and does not require any purchase or subscription to download or play. However, the game may offer optional in-app purchases or ads that can enhance your gameplay or support the developers.

-

Is Call of Duty Warzone Mobile cross-platform?

-

Yes, Call of Duty Warzone Mobile supports cross-platform play with Call of Duty Modern Warfare II and Call of Duty Warzone on PC and consoles. This means you can play with or against players on different devices and platforms. You can also sync your progress and Battle Pass across platforms using your Activision account.

-

How do I update Call of Duty Warzone Mobile?

-

To update Call of Duty Warzone Mobile, you need to go to Google Play or App Store and check for updates. If there is an update available, you need to download and install it before you can play the game. You may also need to restart your device after updating the game.

-

How do I report a bug or a hacker in Call of Duty Warzone Mobile?

-

To report a bug or a hacker in Call of Duty Warzone Mobile, you need to go to the settings menu and tap on the feedback button. You can then choose the type of feedback you want to send, such as bug report, hacker report, suggestion, or praise. You can also attach screenshots or videos to support your feedback. You will then receive a confirmation message that your feedback has been sent.

-

How do I contact customer support for Call of Duty Warzone Mobile?

-

To contact customer support for Call of Duty Warzone Mobile, you need to go to the settings menu and tap on the help button. You can then choose the topic that relates to your issue, such as account, gameplay, purchase, or technical. You can then browse through the FAQs or contact the support team via email or chat.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Facebook APK 9.0 - The Best Way to Connect with Friends and Family.md b/spaces/1phancelerku/anime-remove-background/Facebook APK 9.0 - The Best Way to Connect with Friends and Family.md deleted file mode 100644 index 48986d4c706a236dc2bb501ba450c91eca3aaa59..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Facebook APK 9.0 - The Best Way to Connect with Friends and Family.md +++ /dev/null @@ -1,133 +0,0 @@ -
-

Facebook APK 9.0: What You Need to Know

-

Facebook is one of the most popular social media platforms in the world, with over 2 billion monthly active users. It allows you to connect with your friends and family, discover new things, and communicate with ease. But did you know that there is a way to enjoy Facebook even more on your Android device? It's called Facebook APK 9.0, and it's a modified version of the official Facebook app that offers faster performance, access to beta features, and no need to update manually.

-

facebook apk 9.0


Download File ✏ ✏ ✏ https://jinyurl.com/2uNMzM



-

In this article, we will tell you everything you need to know about Facebook APK 9.0, including how to download and install it on your Android device, what features it offers, what are its pros and cons, and whether you should give it a try or not.

-

Introduction

-

What is Facebook APK 9.0 and why you should download it

-

Facebook APK 9.0 is an Android application package (APK) file that contains a modified version of the official Facebook app for Android devices. An APK file is a compressed file that contains all the code, resources, and assets needed to run an app on an Android device.

-

Facebook APK 9.0 is not available on the Google Play Store, which means you have to download it from a third-party source (such as [APKCombo](^1^) or [Uptodown]( ^2^)). You can think of it as a beta version of the official app, which means you can enjoy some features and updates that are not yet available to the public. Some of these features include:

- -

By downloading Facebook APK 9.0, you can enjoy a better Facebook experience on your Android device, without having to wait for the official app to update.

-

How to download and install Facebook APK 9.0 on your Android device

-

Downloading and installing Facebook APK 9.0 on your Android device is easy and simple, but you need to follow some steps carefully to avoid any problems. Here are the steps you need to follow:

-

facebook apk 9.0 free download
-facebook apk 9.0 latest version
-facebook apk 9.0 for android
-facebook apk 9.0 mod
-facebook apk 9.0 old version
-facebook apk 9.0 update
-facebook apk 9.0 beta
-facebook apk 9.0 lite
-facebook apk 9.0 dark mode
-facebook apk 9.0 pro
-facebook apk 9.0 premium
-facebook apk 9.0 offline
-facebook apk 9.0 cracked
-facebook apk 9.0 hack
-facebook apk 9.0 no ads
-facebook apk 9.0 download uptodown
-facebook apk 9.0 download apkpure
-facebook apk 9.0 download apkcombo
-facebook apk 9.0 download for pc
-facebook apk 9.0 download for ios
-facebook apk 9.0 features
-facebook apk 9.0 review
-facebook apk 9.0 changelog
-facebook apk 9.0 size
-facebook apk 9.0 requirements
-facebook apk 9.0 install
-facebook apk 9.0 uninstall
-facebook apk 9.0 permissions
-facebook apk 9.0 privacy policy
-facebook apk 9.0 terms of service
-facebook apk 9.0 vs messenger
-facebook apk 9.0 vs whatsapp
-facebook apk 9.0 vs instagram
-facebook apk 9.0 vs snapchat
-facebook apk 9.0 vs tiktok
-facebook apk 9.0 problems
-facebook apk 9.0 issues
-facebook apk 9.0 bugs
-facebook apk 9.0 fixes
-facebook apk 9.0 solutions
-facebook apk 9.0 tips and tricks
-facebook apk 9.0 tutorials and guides
-facebook apk 9.0 faqs and answers
-facebook apk 9.0 feedback and ratings
-facebook apk 9.0 support and contact

-
    -
  1. First, you need to uninstall the official Facebook app from your device, if you have it installed. You can do this by going to Settings > Apps > Facebook > Uninstall.
  2. -
  3. Next, you need to enable the installation of apps from unknown sources on your device. You can do this by going to Settings > Security > Unknown Sources > Toggle On.
  4. -
  5. Then, you need to download the Facebook APK 9.0 file from a trusted source (such as [APKCombo] or [Uptodown]). You can use your browser or a file manager app to do this.
  6. -
  7. After downloading the file, you need to locate it on your device and tap on it to start the installation process. You may see a warning message asking you to confirm the installation. Tap on Install and wait for the process to finish.
  8. -
  9. Finally, you need to launch the app and log in with your Facebook account. You may see a message asking you to allow some permissions for the app. Tap on Allow and enjoy Facebook APK 9.0 on your device.
  10. -
-

Note: If you encounter any issues or errors during the installation process, you may need to clear the cache and data of the app by going to Settings > Apps > Facebook APK 9.0 > Storage > Clear Cache and Clear Data.

-

Features of Facebook APK 9.0

-

Connect with Friends and Family

-

One of the main features of Facebook APK 9.0 is that it allows you to connect with your friends and family in various ways. You can:

- -

Facebook APK 9.0 also lets you see what your friends are up to by showing you their posts in your news feed. You can like, comment, or react to their posts, or share them with others. You can also create stories that disappear after 24 hours, or watch stories from your friends and pages you follow.

-

Discover New Things

-

Another feature of Facebook APK 9.0 is that it helps you discover new things that match your interests or curiosity. You can:

- -

Facebook APK 9.0 also gives you access to a variety of games, apps, and services that you can use for fun or convenience. You can play games with your friends or other people online, use apps that enhance your productivity or creativity, or use services that offer shopping, dating, or travel options.

-

Communicate with Ease

-

The last feature of Facebook APK 9.0 is that it enables you to communicate with ease with anyone on Facebook. You can:

- -

Facebook APK 9.0 also allows you to communicate with people who are not on Facebook, by using their phone numbers or email addresses. You can also sync your contacts with the app, so you can see who is on Facebook and who is not.

-

Pros and Cons of Facebook APK 9.0

-

Pros

-

As you can see, Facebook APK 9.0 offers many benefits that make it a great alternative to the official Facebook app. Some of the pros of using Facebook APK 9.0 are:

- -

Cons

-

However, Facebook APK 9.0 also has some drawbacks that you should be aware of before downloading it. Some of the cons of using Facebook APK 9.0 are:

- -

Conclusion

-

In conclusion, Facebook APK 9.0 is a modified version of the official Facebook app that offers many advantages over the original app. It allows you to connect with your friends and family, discover new things, and communicate with ease on your Android device. It also gives you faster performance, access to beta features, and no need to update manually.

-

However, Facebook APK 9.0 also has some disadvantages that you should consider before downloading it. It may contain bugs and glitches, may not be compatible with your device or Android version, and may pose security risks if downloaded from untrusted sources.

-

Therefore, we recommend that you download Facebook APK 9.0 only from a trusted source (such as [APKCombo] or [Uptodown]), and only if you are willing to take the risk of using a beta version of the app. Otherwise, you may want to stick with the official Facebook app for a more stable and secure experience.

-

Frequently Asked Questions (FAQs)

-

Q: What is the difference between Facebook APK 9.0 and Facebook Lite?

-

A: Facebook Lite is another version of the official Facebook app that is designed for low-end devices or slow internet connections. It has a smaller size, consumes less data, and works faster than the regular app. However, it also has fewer features and functions than the regular app. Facebook APK 9.0 is a modified version of the regular app that has more features and functions than the regular app, but also requires more data and resources than the regular app.

-

Q: Is Facebook APK 9.0 safe to use?

-

A: Facebook APK 9.0 is safe to use as long as you download it from a trusted source (such as [APKCombo] or [Uptodown]). However, since it is a beta version of the app, it may contain bugs and glitches that affect the user experience or security. Therefore, you should always be careful when using Facebook APK 9.0 and avoid sharing sensitive information or clicking on suspicious links.

-

Q: How do I update Facebook APK 9.0?

-

A: Unlike the official Facebook app, which updates automatically through the Google Play Store, Facebook APK 9.0 does not update automatically. You have to download the latest version of the app from a trusted source (such as [APKCombo] or [Uptodown]) and install it over the existing app.

-

Q: Can I use both Facebook APK 9.0 and the official Facebook app on my device?A: No, you cannot use both Facebook APK 9.0 and the official Facebook app on your device at the same time. You have to uninstall one of them before installing the other. This is because they have the same package name and signature, which means they cannot coexist on the same device.

-

Q: What are some alternatives to Facebook APK 9.0?

-

A: If you are looking for some alternatives to Facebook APK 9.0, you may want to try some of these apps:

-

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/4Taps/SadTalker/src/facerender/modules/discriminator.py b/spaces/4Taps/SadTalker/src/facerender/modules/discriminator.py deleted file mode 100644 index d4459b07cb075c9f9d345f9b3dffc02cd859313b..0000000000000000000000000000000000000000 --- a/spaces/4Taps/SadTalker/src/facerender/modules/discriminator.py +++ /dev/null @@ -1,90 +0,0 @@ -from torch import nn -import torch.nn.functional as F -from facerender.modules.util import kp2gaussian -import torch - - -class DownBlock2d(nn.Module): - """ - Simple block for processing video (encoder). - """ - - def __init__(self, in_features, out_features, norm=False, kernel_size=4, pool=False, sn=False): - super(DownBlock2d, self).__init__() - self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size) - - if sn: - self.conv = nn.utils.spectral_norm(self.conv) - - if norm: - self.norm = nn.InstanceNorm2d(out_features, affine=True) - else: - self.norm = None - self.pool = pool - - def forward(self, x): - out = x - out = self.conv(out) - if self.norm: - out = self.norm(out) - out = F.leaky_relu(out, 0.2) - if self.pool: - out = F.avg_pool2d(out, (2, 2)) - return out - - -class Discriminator(nn.Module): - """ - Discriminator similar to Pix2Pix - """ - - def __init__(self, num_channels=3, block_expansion=64, num_blocks=4, max_features=512, - sn=False, **kwargs): - super(Discriminator, self).__init__() - - down_blocks = [] - for i in range(num_blocks): - down_blocks.append( - DownBlock2d(num_channels if i == 0 else min(max_features, block_expansion * (2 ** i)), - min(max_features, block_expansion * (2 ** (i + 1))), - norm=(i != 0), kernel_size=4, pool=(i != num_blocks - 1), sn=sn)) - - self.down_blocks = nn.ModuleList(down_blocks) - self.conv = nn.Conv2d(self.down_blocks[-1].conv.out_channels, out_channels=1, kernel_size=1) - if sn: - self.conv = nn.utils.spectral_norm(self.conv) - - def forward(self, x): - feature_maps = [] - out = x - - for down_block in self.down_blocks: - feature_maps.append(down_block(out)) - out = feature_maps[-1] - prediction_map = self.conv(out) - - return feature_maps, prediction_map - - -class MultiScaleDiscriminator(nn.Module): - """ - Multi-scale (scale) discriminator - """ - - def __init__(self, scales=(), **kwargs): - super(MultiScaleDiscriminator, self).__init__() - self.scales = scales - discs = {} - for scale in scales: - discs[str(scale).replace('.', '-')] = Discriminator(**kwargs) - self.discs = nn.ModuleDict(discs) - - def forward(self, x): - out_dict = {} - for scale, disc in self.discs.items(): - scale = str(scale).replace('-', '.') - key = 'prediction_' + scale - feature_maps, prediction_map = disc(x[key]) - out_dict['feature_maps_' + scale] = feature_maps - out_dict['prediction_map_' + scale] = prediction_map - return out_dict diff --git a/spaces/801artistry/RVC801/infer/lib/train/mel_processing.py b/spaces/801artistry/RVC801/infer/lib/train/mel_processing.py deleted file mode 100644 index f458775bf62b79f791b419ca7ed62c550ae252d5..0000000000000000000000000000000000000000 --- a/spaces/801artistry/RVC801/infer/lib/train/mel_processing.py +++ /dev/null @@ -1,132 +0,0 @@ -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn -import logging - -logger = logging.getLogger(__name__) - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - return dynamic_range_compression_torch(magnitudes) - - -def spectral_de_normalize_torch(magnitudes): - return dynamic_range_decompression_torch(magnitudes) - - -# Reusable banks -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - """Convert waveform into Linear-frequency Linear-amplitude spectrogram. - - Args: - y :: (B, T) - Audio waveforms - n_fft - sampling_rate - hop_size - win_size - center - Returns: - :: (B, Freq, Frame) - Linear-frequency Linear-amplitude spectrogram - """ - # Validation - if torch.min(y) < -1.07: - logger.debug("min value is %s", str(torch.min(y))) - if torch.max(y) > 1.07: - logger.debug("max value is %s", str(torch.max(y))) - - # Window - Cache if needed - global hann_window - dtype_device = str(y.dtype) + "_" + str(y.device) - wnsize_dtype_device = str(win_size) + "_" + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to( - dtype=y.dtype, device=y.device - ) - - # Padding - y = torch.nn.functional.pad( - y.unsqueeze(1), - (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), - mode="reflect", - ) - y = y.squeeze(1) - - # Complex Spectrogram :: (B, T) -> (B, Freq, Frame, RealComplex=2) - spec = torch.stft( - y, - n_fft, - hop_length=hop_size, - win_length=win_size, - window=hann_window[wnsize_dtype_device], - center=center, - pad_mode="reflect", - normalized=False, - onesided=True, - return_complex=False, - ) - - # Linear-frequency Linear-amplitude spectrogram :: (B, Freq, Frame, RealComplex=2) -> (B, Freq, Frame) - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - # MelBasis - Cache if needed - global mel_basis - dtype_device = str(spec.dtype) + "_" + str(spec.device) - fmax_dtype_device = str(fmax) + "_" + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn( - sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax - ) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to( - dtype=spec.dtype, device=spec.device - ) - - # Mel-frequency Log-amplitude spectrogram :: (B, Freq=num_mels, Frame) - melspec = torch.matmul(mel_basis[fmax_dtype_device], spec) - melspec = spectral_normalize_torch(melspec) - return melspec - - -def mel_spectrogram_torch( - y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False -): - """Convert waveform into Mel-frequency Log-amplitude spectrogram. - - Args: - y :: (B, T) - Waveforms - Returns: - melspec :: (B, Freq, Frame) - Mel-frequency Log-amplitude spectrogram - """ - # Linear-frequency Linear-amplitude spectrogram :: (B, T) -> (B, Freq, Frame) - spec = spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center) - - # Mel-frequency Log-amplitude spectrogram :: (B, Freq, Frame) -> (B, Freq=num_mels, Frame) - melspec = spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax) - - return melspec diff --git a/spaces/A00001/bingothoo/src/components/chat-list.tsx b/spaces/A00001/bingothoo/src/components/chat-list.tsx deleted file mode 100644 index 624a78ef0d7be0f1192cf02a81e2e9cf214cb193..0000000000000000000000000000000000000000 --- a/spaces/A00001/bingothoo/src/components/chat-list.tsx +++ /dev/null @@ -1,28 +0,0 @@ -import React from 'react' - -import { Separator } from '@/components/ui/separator' -import { ChatMessage } from '@/components/chat-message' -import { ChatMessageModel } from '@/lib/bots/bing/types' - -export interface ChatList { - messages: ChatMessageModel[] -} - -export function ChatList({ messages }: ChatList) { - if (!messages.length) { - return null - } - - return ( -
- {messages.map((message, index) => ( - - - {index < messages.length - 1 && ( - - )} - - ))} -
- ) -} diff --git a/spaces/AI4PD/hexviz/hexviz/view.py b/spaces/AI4PD/hexviz/hexviz/view.py deleted file mode 100644 index 2d2611da4f32be617f5a9a135d8a812454c975b1..0000000000000000000000000000000000000000 --- a/spaces/AI4PD/hexviz/hexviz/view.py +++ /dev/null @@ -1,154 +0,0 @@ -from io import StringIO - -import streamlit as st -from Bio.PDB import PDBParser - -from hexviz.attention import get_pdb_file, get_pdb_from_seq - -menu_items = { - "Get Help": "https://huggingface.co/spaces/aksell/hexviz/discussions/new", - "Report a bug": "https://huggingface.co/spaces/aksell/hexviz/discussions/new", - "About": "Created by [Aksel Lenes](https://github.com/aksell/) from Noelia Ferruz's group at the Institute of Molecular Biology of Barcelona. Read more at https://www.aiproteindesign.com/", -} - - -def get_selecte_model_index(models): - selected_model_name = st.session_state.get("selected_model_name", None) - if selected_model_name is None: - return 0 - else: - return next( - (i for i, model in enumerate(models) if model.name.value == selected_model_name), - None, - ) - - -def clear_model_state(): - if "plot_heads" in st.session_state: - del st.session_state.plot_heads - if "plot_layers" in st.session_state: - del st.session_state.plot_layers - if "selected_head" in st.session_state: - del st.session_state.selected_head - if "selected_layer" in st.session_state: - del st.session_state.selected_layer - if "plot_layers" in st.session_state: - del st.session_state.plot_layers - if "plot_heads" in st.session_state: - del st.session_state.plot_heads - if "label_tokens" in st.session_state: - del st.session_state.label_tokens - - -def select_model(models): - if "selected_model_name" not in st.session_state: - st.session_state.selected_model_name = models[0].name.value - selected_model_name = st.selectbox( - "Select model", - [model.name.value for model in models], - key="selected_model_name", - on_change=clear_model_state, - ) - select_model = next( - (model for model in models if model.name.value == selected_model_name), None - ) - return select_model - - -def clear_pdb_state(): - if "selected_chains" in st.session_state: - del st.session_state.selected_chains - if "selected_chain" in st.session_state: - del st.session_state.selected_chain - if "sequence_slice" in st.session_state: - del st.session_state.sequence_slice - if "uploaded_pdb_str" in st.session_state: - del st.session_state.uploaded_pdb_str - - -def select_pdb(): - if "pdb_id" not in st.session_state: - st.session_state.pdb_id = "2FZ5" - pdb_id = st.text_input(label="1.PDB ID", key="pdb_id", on_change=clear_pdb_state) - return pdb_id - - -def select_protein(pdb_code, uploaded_file, input_sequence): - # We get the pdb from 1 of 3 places: - # 1. Cached pdb from session storage - # 2. PDB file from uploaded file - # 3. PDB file fetched based on the pdb_code input - parser = PDBParser() - if uploaded_file is not None: - pdb_str = uploaded_file.read().decode("utf-8") - st.session_state["uploaded_pdb_str"] = pdb_str - source = f"uploaded pdb file {uploaded_file.name}" - structure = parser.get_structure("Userfile", StringIO(pdb_str)) - elif input_sequence: - pdb_str = get_pdb_from_seq(str(input_sequence)) - if not pdb_str: - st.error("ESMfold error, unable to fold sequence") - return None, None, None - else: - structure = parser.get_structure("ESMFold", StringIO(pdb_str)) - if "selected_chains" in st.session_state: - del st.session_state.selected_chains - source = "Input sequence + ESM-fold" - elif "uploaded_pdb_str" in st.session_state: - pdb_str = st.session_state.uploaded_pdb_str - source = "Uploaded file stored in cache" - structure = parser.get_structure("userfile", StringIO(pdb_str)) - else: - file = get_pdb_file(pdb_code) - pdb_str = file.read() - source = f"PDB ID: {pdb_code}" - structure = parser.get_structure(pdb_code, StringIO(pdb_str)) - - return pdb_str, structure, source - - -def select_heads_and_layers(sidebar, model): - sidebar.markdown( - """ - Select Heads and Layers - --- - """ - ) - if "plot_heads" not in st.session_state: - st.session_state.plot_heads = (1, model.heads // 2) - head_range = sidebar.slider( - "Heads to plot", min_value=1, max_value=model.heads, key="plot_heads", step=1 - ) - if "plot_layers" not in st.session_state: - st.session_state.plot_layers = (1, model.layers // 2) - layer_range = sidebar.slider( - "Layers to plot", min_value=1, max_value=model.layers, key="plot_layers", step=1 - ) - - if "plot_step_size" not in st.session_state: - st.session_state.plot_step_size = 1 - step_size = sidebar.number_input( - "Optional step size to skip heads and layers", - key="plot_step_size", - min_value=1, - max_value=model.layers, - ) - layer_sequence = list(range(layer_range[0] - 1, layer_range[1], step_size)) - head_sequence = list(range(head_range[0] - 1, head_range[1], step_size)) - - return layer_sequence, head_sequence - - -def select_sequence_slice(sequence_length): - st.sidebar.markdown( - """ - Sequence segment to plot - --- - """ - ) - if "sequence_slice" not in st.session_state: - st.session_state.sequence_slice = (1, min(50, sequence_length)) - slice = st.sidebar.slider( - "Sequence", key="sequence_slice", min_value=1, max_value=sequence_length, step=1 - ) - return slice diff --git a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/portaspeech/fvae.py b/spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/portaspeech/fvae.py deleted file mode 100644 index 9659a4f2abb9fd2ef887a432b7ad55b0129a1c6f..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/portaspeech/fvae.py +++ /dev/null @@ -1,202 +0,0 @@ -import numpy as np -import torch -import torch.distributions as dist -from torch import nn - -from modules.commons.conv import ConditionalConvBlocks -from modules.commons.normalizing_flow.res_flow import ResFlow -from modules.commons.wavenet import WN - - -class FVAEEncoder(nn.Module): - def __init__(self, c_in, hidden_size, c_latent, kernel_size, - n_layers, c_cond=0, p_dropout=0, strides=[4], nn_type='wn'): - super().__init__() - self.strides = strides - self.hidden_size = hidden_size - if np.prod(strides) == 1: - self.pre_net = nn.Conv1d(c_in, hidden_size, kernel_size=1) - else: - self.pre_net = nn.Sequential(*[ - nn.Conv1d(c_in, hidden_size, kernel_size=s * 2, stride=s, padding=s // 2) - if i == 0 else - nn.Conv1d(hidden_size, hidden_size, kernel_size=s * 2, stride=s, padding=s // 2) - for i, s in enumerate(strides) - ]) - if nn_type == 'wn': - self.nn = WN(hidden_size, kernel_size, 1, n_layers, c_cond, p_dropout) - elif nn_type == 'conv': - self.nn = ConditionalConvBlocks( - hidden_size, c_cond, hidden_size, None, kernel_size, - layers_in_block=2, is_BTC=False, num_layers=n_layers) - - self.out_proj = nn.Conv1d(hidden_size, c_latent * 2, 1) - self.latent_channels = c_latent - - def forward(self, x, nonpadding, cond): - x = self.pre_net(x) - nonpadding = nonpadding[:, :, ::np.prod(self.strides)][:, :, :x.shape[-1]] - x = x * nonpadding - x = self.nn(x, nonpadding=nonpadding, cond=cond) * nonpadding - x = self.out_proj(x) - m, logs = torch.split(x, self.latent_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) - return z, m, logs, nonpadding - - -class FVAEDecoder(nn.Module): - def __init__(self, c_latent, hidden_size, out_channels, kernel_size, - n_layers, c_cond=0, p_dropout=0, strides=[4], nn_type='wn'): - super().__init__() - self.strides = strides - self.hidden_size = hidden_size - self.pre_net = nn.Sequential(*[ - nn.ConvTranspose1d(c_latent, hidden_size, kernel_size=s, stride=s) - if i == 0 else - nn.ConvTranspose1d(hidden_size, hidden_size, kernel_size=s, stride=s) - for i, s in enumerate(strides) - ]) - if nn_type == 'wn': - self.nn = WN(hidden_size, kernel_size, 1, n_layers, c_cond, p_dropout) - elif nn_type == 'conv': - self.nn = ConditionalConvBlocks( - hidden_size, c_cond, hidden_size, [1] * n_layers, kernel_size, - layers_in_block=2, is_BTC=False) - self.out_proj = nn.Conv1d(hidden_size, out_channels, 1) - - def forward(self, x, nonpadding, cond): - x = self.pre_net(x) - x = x * nonpadding - x = self.nn(x, nonpadding=nonpadding, cond=cond) * nonpadding - x = self.out_proj(x) - return x - - -class FVAE(nn.Module): - def __init__(self, - c_in_out, hidden_size, c_latent, - kernel_size, enc_n_layers, dec_n_layers, c_cond, strides, - use_prior_flow, flow_hidden=None, flow_kernel_size=None, flow_n_steps=None, - encoder_type='wn', decoder_type='wn'): - super(FVAE, self).__init__() - self.strides = strides - self.hidden_size = hidden_size - self.latent_size = c_latent - self.use_prior_flow = use_prior_flow - if np.prod(strides) == 1: - self.g_pre_net = nn.Conv1d(c_cond, c_cond, kernel_size=1) - else: - self.g_pre_net = nn.Sequential(*[ - nn.Conv1d(c_cond, c_cond, kernel_size=s * 2, stride=s, padding=s // 2) - for i, s in enumerate(strides) - ]) - self.encoder = FVAEEncoder(c_in_out, hidden_size, c_latent, kernel_size, - enc_n_layers, c_cond, strides=strides, nn_type=encoder_type) - if use_prior_flow: - self.prior_flow = ResFlow( - c_latent, flow_hidden, flow_kernel_size, flow_n_steps, 4, c_cond=c_cond) - self.decoder = FVAEDecoder(c_latent, hidden_size, c_in_out, kernel_size, - dec_n_layers, c_cond, strides=strides, nn_type=decoder_type) - self.prior_dist = dist.Normal(0, 1) - - def forward(self, x=None, nonpadding=None, cond=None, infer=False, noise_scale=1.0, **kwargs): - """ - - :param x: [B, C_in_out, T] - :param nonpadding: [B, 1, T] - :param cond: [B, C_g, T] - :return: - """ - if nonpadding is None: - nonpadding = 1 - cond_sqz = self.g_pre_net(cond) - if not infer: - z_q, m_q, logs_q, nonpadding_sqz = self.encoder(x, nonpadding, cond_sqz) - q_dist = dist.Normal(m_q, logs_q.exp()) - if self.use_prior_flow: - logqx = q_dist.log_prob(z_q) - z_p = self.prior_flow(z_q, nonpadding_sqz, cond_sqz) - logpx = self.prior_dist.log_prob(z_p) - loss_kl = ((logqx - logpx) * nonpadding_sqz).sum() / nonpadding_sqz.sum() / logqx.shape[1] - else: - loss_kl = torch.distributions.kl_divergence(q_dist, self.prior_dist) - loss_kl = (loss_kl * nonpadding_sqz).sum() / nonpadding_sqz.sum() / z_q.shape[1] - z_p = None - return z_q, loss_kl, z_p, m_q, logs_q - else: - latent_shape = [cond_sqz.shape[0], self.latent_size, cond_sqz.shape[2]] - z_p = torch.randn(latent_shape).to(cond.device) * noise_scale - if self.use_prior_flow: - z_p = self.prior_flow(z_p, 1, cond_sqz, reverse=True) - return z_p - - -class SyntaFVAE(nn.Module): - def __init__(self, - c_in_out, hidden_size, c_latent, - kernel_size, enc_n_layers, dec_n_layers, c_cond, strides, - use_prior_flow, flow_hidden=None, flow_kernel_size=None, flow_n_steps=None, - encoder_type='wn', decoder_type='wn'): - super(SyntaFVAE, self).__init__() - self.strides = strides - self.hidden_size = hidden_size - self.latent_size = c_latent - self.use_prior_flow = use_prior_flow - if np.prod(strides) == 1: - self.g_pre_net = nn.Conv1d(c_cond, c_cond, kernel_size=1) - else: - self.g_pre_net = nn.Sequential(*[ - nn.Conv1d(c_cond, c_cond, kernel_size=s * 2, stride=s, padding=s // 2) - for i, s in enumerate(strides) - ]) - self.encoder = FVAEEncoder(c_in_out, hidden_size, c_latent, kernel_size, - enc_n_layers, c_cond, strides=strides, nn_type=encoder_type) - if use_prior_flow: - self.prior_flow = ResFlow( - c_latent, flow_hidden, flow_kernel_size, flow_n_steps, 4, c_cond=c_cond) - self.decoder = FVAEDecoder(c_latent, hidden_size, c_in_out, kernel_size, - dec_n_layers, c_cond, strides=strides, nn_type=decoder_type) - self.prior_dist = dist.Normal(0, 1) - self.graph_encoder = GraphAuxEnc(in_dim=hidden_size, hid_dim=hidden_size,out_dim=hidden_size) - - def forward(self, x=None, nonpadding=None, cond=None, infer=False, noise_scale=1.0, - mel2word=None, ph2word=None, graph_lst=None, etypes_lst=None): - """ - - :param x: target mel, [B, C_in_out, T] - :param nonpadding: [B, 1, T] - :param cond: phoneme encoding, [B, C_g, T] - :return: - """ - word_len = ph2word.max(dim=1)[0] - ph_encoding_for_graph = cond.detach() + 0.1 * (cond - cond.detach()) # only 0.1x grad can pass through - _, ph_out_word_encoding_for_graph = GraphAuxEnc.ph_encoding_to_word_encoding(ph_encoding_for_graph.transpose(1,2), mel2word, word_len) - t_m = mel2word.shape[-1] - g_graph = self.graph_encoder.word_forward(graph_lst=graph_lst, word_encoding=ph_out_word_encoding_for_graph, etypes_lst=etypes_lst) - g_graph = g_graph.transpose(1,2) - g_graph = GraphAuxEnc._postprocess_word2ph(g_graph,mel2word,t_m) - g_graph = g_graph.transpose(1,2) - cond = cond + g_graph * 1. - - if nonpadding is None: - nonpadding = 1 - cond_sqz = self.g_pre_net(cond) - if not infer: - z_q, m_q, logs_q, nonpadding_sqz = self.encoder(x, nonpadding, cond_sqz) - q_dist = dist.Normal(m_q, logs_q.exp()) - if self.use_prior_flow: - logqx = q_dist.log_prob(z_q) - z_p = self.prior_flow(z_q, nonpadding_sqz, cond_sqz) - logpx = self.prior_dist.log_prob(z_p) - loss_kl = ((logqx - logpx) * nonpadding_sqz).sum() / nonpadding_sqz.sum() / logqx.shape[1] - else: - loss_kl = torch.distributions.kl_divergence(q_dist, self.prior_dist) - loss_kl = (loss_kl * nonpadding_sqz).sum() / nonpadding_sqz.sum() / z_q.shape[1] - z_p = None - return z_q, loss_kl, z_p, m_q, logs_q - else: - latent_shape = [cond_sqz.shape[0], self.latent_size, cond_sqz.shape[2]] - z_p = torch.randn(latent_shape).to(cond.device) * noise_scale - if self.use_prior_flow: - z_p = self.prior_flow(z_p, 1, cond_sqz, reverse=True) - return z_p \ No newline at end of file diff --git a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/syntaspeech/syntaspeech.py b/spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/syntaspeech/syntaspeech.py deleted file mode 100644 index c74b6a53765026919ebd3e583861051441c508da..0000000000000000000000000000000000000000 --- a/spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/syntaspeech/syntaspeech.py +++ /dev/null @@ -1,274 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import Linear -from utils.hparams import hparams -from modules.commons.conv import ConvBlocks, ConditionalConvBlocks -from modules.commons.common_layers import Embedding -from modules.commons.rel_transformer import RelTransformerEncoder -from modules.commons.transformer import MultiheadAttention, FFTBlocks -from modules.commons.align_ops import clip_mel2token_to_multiple, build_word_mask, expand_states, mel2ph_to_mel2word -from modules.tts.fastspeech import FS_DECODERS, FastSpeech -from modules.portaspeech.fvae import SyntaFVAE, FVAE -from utils.nn.seq_utils import group_hidden_by_segs -from modules.fastspeech.tts_modules import SyntaDurationPredictor - - -class SinusoidalPosEmb(nn.Module): - def __init__(self, dim): - super().__init__() - self.dim = dim - def forward(self, x): - """ - - :param x: [B, T] - :return: [B, T, H] - """ - device = x.device - half_dim = self.dim // 2 - emb = math.log(10000) / (half_dim - 1) - emb = torch.exp(torch.arange(half_dim, device=device) * -emb) - emb = x[:, :, None] * emb[None, :] - emb = torch.cat((emb.sin(), emb.cos()), dim=-1) - return emb - - -class SyntaSpeech(FastSpeech): - def __init__(self, ph_dict_size, word_dict_size, out_dims=None): - super().__init__(ph_dict_size, out_dims) - # build linguistic encoder - if hparams['num_spk'] > 1: - self.spk_embed_proj = Embedding(hparams['num_spk'], self.hidden_size) - if hparams['use_word_encoder']: - self.word_encoder = RelTransformerEncoder( - word_dict_size, self.hidden_size, self.hidden_size, self.hidden_size, 2, - hparams['word_enc_layers'], hparams['enc_ffn_kernel_size']) - if hparams['dur_level'] == 'word': - if hparams['word_encoder_type'] == 'rel_fft': - self.ph2word_encoder = RelTransformerEncoder( - 0, self.hidden_size, self.hidden_size, self.hidden_size, 2, - hparams['word_enc_layers'], hparams['enc_ffn_kernel_size']) - if hparams['word_encoder_type'] == 'fft': - self.ph2word_encoder = FFTBlocks( - self.hidden_size, hparams['word_enc_layers'], 1, num_heads=hparams['num_heads']) - self.sin_pos = SinusoidalPosEmb(self.hidden_size) - self.enc_pos_proj = nn.Linear(2 * self.hidden_size, self.hidden_size) - self.dec_query_proj = nn.Linear(2 * self.hidden_size, self.hidden_size) - self.dec_res_proj = nn.Linear(2 * self.hidden_size, self.hidden_size) - self.attn = MultiheadAttention(self.hidden_size, 1, encoder_decoder_attention=True, bias=False) - self.attn.enable_torch_version = False - if hparams['text_encoder_postnet']: - self.text_encoder_postnet = ConvBlocks( - self.hidden_size, self.hidden_size, [1] * 3, 5, layers_in_block=2) - else: - self.sin_pos = SinusoidalPosEmb(self.hidden_size) - - predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size - self.dur_predictor = SyntaDurationPredictor( - self.hidden_size, - n_chans=predictor_hidden, - n_layers=hparams['dur_predictor_layers'], - dropout_rate=hparams['predictor_dropout'], - kernel_size=hparams['dur_predictor_kernel']) - # build VAE decoder - if hparams['use_fvae']: - del self.decoder - del self.mel_out - if hparams.get("use_gae_in_prior", True): - self.fvae = SyntaFVAE( - c_in_out=self.out_dims, - hidden_size=hparams['fvae_enc_dec_hidden'], c_latent=hparams['latent_size'], - kernel_size=hparams['fvae_kernel_size'], - enc_n_layers=hparams['fvae_enc_n_layers'], - dec_n_layers=hparams['fvae_dec_n_layers'], - c_cond=self.hidden_size, - use_prior_flow=hparams['use_prior_flow'], - flow_hidden=hparams['prior_flow_hidden'], - flow_kernel_size=hparams['prior_flow_kernel_size'], - flow_n_steps=hparams['prior_flow_n_blocks'], - strides=[hparams['fvae_strides']], - encoder_type=hparams['fvae_encoder_type'], - decoder_type=hparams['fvae_decoder_type'], - ) - else: - self.fvae = FVAE( - c_in_out=self.out_dims, - hidden_size=hparams['fvae_enc_dec_hidden'], c_latent=hparams['latent_size'], - kernel_size=hparams['fvae_kernel_size'], - enc_n_layers=hparams['fvae_enc_n_layers'], - dec_n_layers=hparams['fvae_dec_n_layers'], - c_cond=self.hidden_size, - use_prior_flow=hparams['use_prior_flow'], - flow_hidden=hparams['prior_flow_hidden'], - flow_kernel_size=hparams['prior_flow_kernel_size'], - flow_n_steps=hparams['prior_flow_n_blocks'], - strides=[hparams['fvae_strides']], - encoder_type=hparams['fvae_encoder_type'], - decoder_type=hparams['fvae_decoder_type'], - ) - else: - self.decoder = FS_DECODERS[hparams['decoder_type']](hparams) - self.mel_out = Linear(self.hidden_size, self.out_dims, bias=True) - if hparams['use_pitch_embed']: - self.pitch_embed = Embedding(300, self.hidden_size, 0) - if hparams['add_word_pos']: - self.word_pos_proj = Linear(self.hidden_size, self.hidden_size) - - def build_embedding(self, dictionary, embed_dim): - num_embeddings = len(dictionary) - emb = Embedding(num_embeddings, embed_dim, self.padding_idx) - return emb - - def forward(self, txt_tokens, word_tokens, ph2word, word_len, mel2word=None, mel2ph=None, - spk_embed=None, spk_id=None, pitch=None, infer=False, tgt_mels=None, - global_step=None, graph_lst=None, etypes_lst=None, *args, **kwargs): - - if hparams['use_spk_embed']: - spk_embed = spk_embed - elif hparams['use_spk_id']: - spk_embed = self.spk_embed_proj(spk_id)[:, None, :] - else: - spk_embed = 0 - - ret = {} - style_embed = self.forward_style_embed(spk_embed, spk_id) # speaker embedding, [B, 1, C] - x, tgt_nonpadding = self.run_text_encoder( - txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, style_embed, ret, graph_lst=graph_lst, etypes_lst=etypes_lst, **kwargs) - x = x + style_embed # it maybe necessary to achieve multi-speaker - x = x * tgt_nonpadding - ret['nonpadding'] = tgt_nonpadding - if hparams['use_pitch_embed']: - x = x + self.pitch_embed(pitch) - ret['decoder_inp'] = x - if infer and (mel2ph is None or mel2word is None): - mel2word = ret['mel2word'] - ret['mel_out_fvae'] = ret['mel_out'] = self.run_decoder(x, tgt_nonpadding, ret, infer, tgt_mels, global_step, - mel2word=mel2word, ph2word=ph2word, graph_lst=graph_lst, etypes_lst=etypes_lst) - return ret - - def run_text_encoder(self, txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, style_embed, ret, graph_lst, etypes_lst, **kwargs): - word2word = torch.arange(word_len)[None, :].to(ph2word.device) + 1 # [B, T_mel, T_word] - src_nonpadding = (txt_tokens > 0).float()[:, :, None] - use_bert = hparams.get("use_bert") is True - if use_bert: - ph_encoder_out = self.encoder(txt_tokens, bert_feats=kwargs['bert_feats'], ph2word=ph2word, - graph_lst=graph_lst, etypes_lst=etypes_lst, - cl_feats=kwargs['cl_feats'], ret=ret) * src_nonpadding + style_embed - else: - ph_encoder_out = self.encoder(txt_tokens) * src_nonpadding + style_embed - if hparams['use_word_encoder']: - word_encoder_out = self.word_encoder(word_tokens) + style_embed - ph_encoder_out = ph_encoder_out + expand_states(word_encoder_out, ph2word) - - dur_input = ph_encoder_out * src_nonpadding - if hparams['dur_level'] == 'word': - word_encoder_out = 0 - h_ph_gb_word = group_hidden_by_segs(ph_encoder_out, ph2word, word_len)[0] - word_encoder_out = word_encoder_out + self.ph2word_encoder(h_ph_gb_word) - if hparams['use_word_encoder']: - word_encoder_out = word_encoder_out + self.word_encoder(word_tokens) - mel2word = self.forward_dur(dur_input, mel2word, ret, ph2word=ph2word, word_len=word_len, graph_lst=graph_lst, etypes_lst=etypes_lst) - mel2word = clip_mel2token_to_multiple(mel2word, hparams['frames_multiple']) - ret['mel2word'] = mel2word - tgt_nonpadding = (mel2word > 0).float()[:, :, None] - enc_pos = self.get_pos_embed(word2word, ph2word) # [B, T_ph, H] - dec_pos = self.get_pos_embed(word2word, mel2word) # [B, T_mel, H] - dec_word_mask = build_word_mask(mel2word, ph2word) # [B, T_mel, T_ph] - x, weight = self.attention(ph_encoder_out, enc_pos, word_encoder_out, dec_pos, mel2word, dec_word_mask) - if hparams['add_word_pos']: - x = x + self.word_pos_proj(dec_pos) - ret['attn'] = weight - else: - mel2ph = self.forward_dur(dur_input, mel2ph, ret) - mel2ph = clip_mel2token_to_multiple(mel2ph, hparams['frames_multiple']) - mel2word = mel2ph_to_mel2word(mel2ph, ph2word) - x = expand_states(ph_encoder_out, mel2ph) - if hparams['add_word_pos']: - dec_pos = self.get_pos_embed(word2word, mel2word) # [B, T_mel, H] - x = x + self.word_pos_proj(dec_pos) - tgt_nonpadding = (mel2ph > 0).float()[:, :, None] - if hparams['use_word_encoder']: - x = x + expand_states(word_encoder_out, mel2word) - return x, tgt_nonpadding - - def attention(self, ph_encoder_out, enc_pos, word_encoder_out, dec_pos, mel2word, dec_word_mask): - ph_kv = self.enc_pos_proj(torch.cat([ph_encoder_out, enc_pos], -1)) - word_enc_out_expend = expand_states(word_encoder_out, mel2word) - word_enc_out_expend = torch.cat([word_enc_out_expend, dec_pos], -1) - if hparams['text_encoder_postnet']: - word_enc_out_expend = self.dec_res_proj(word_enc_out_expend) - word_enc_out_expend = self.text_encoder_postnet(word_enc_out_expend) - dec_q = x_res = word_enc_out_expend - else: - dec_q = self.dec_query_proj(word_enc_out_expend) - x_res = self.dec_res_proj(word_enc_out_expend) - ph_kv, dec_q = ph_kv.transpose(0, 1), dec_q.transpose(0, 1) - x, (weight, _) = self.attn(dec_q, ph_kv, ph_kv, attn_mask=(1 - dec_word_mask) * -1e9) - x = x.transpose(0, 1) - x = x + x_res - return x, weight - - def run_decoder(self, x, tgt_nonpadding, ret, infer, tgt_mels=None, global_step=0, - mel2word=None, ph2word=None, graph_lst=None, etypes_lst=None): - if not hparams['use_fvae']: - x = self.decoder(x) - x = self.mel_out(x) - ret['kl'] = 0 - return x * tgt_nonpadding - else: - # x is the phoneme encoding - x = x.transpose(1, 2) # [B, H, T] - tgt_nonpadding_BHT = tgt_nonpadding.transpose(1, 2) # [B, H, T] - if infer: - z = self.fvae(cond=x, infer=True, mel2word=mel2word, ph2word=ph2word, graph_lst=graph_lst, etypes_lst=etypes_lst) - else: - tgt_mels = tgt_mels.transpose(1, 2) # [B, 80, T] - z, ret['kl'], ret['z_p'], ret['m_q'], ret['logs_q'] = self.fvae( - tgt_mels, tgt_nonpadding_BHT, cond=x, mel2word=mel2word, ph2word=ph2word, graph_lst=graph_lst, etypes_lst=etypes_lst) - if global_step < hparams['posterior_start_steps']: - z = torch.randn_like(z) - x_recon = self.fvae.decoder(z, nonpadding=tgt_nonpadding_BHT, cond=x).transpose(1, 2) - ret['pre_mel_out'] = x_recon - return x_recon - - def forward_dur(self, dur_input, mel2word, ret, **kwargs): - """ - - :param dur_input: [B, T_txt, H] - :param mel2ph: [B, T_mel] - :param txt_tokens: [B, T_txt] - :param ret: - :return: - """ - word_len = kwargs['word_len'] - ph2word = kwargs['ph2word'] - graph_lst = kwargs['graph_lst'] - etypes_lst = kwargs['etypes_lst'] - src_padding = dur_input.data.abs().sum(-1) == 0 - dur_input = dur_input.detach() + hparams['predictor_grad'] * (dur_input - dur_input.detach()) - dur = self.dur_predictor(dur_input, src_padding, ph2word, graph_lst, etypes_lst) - - B, T_ph = ph2word.shape - dur = torch.zeros([B, word_len.max() + 1]).to(ph2word.device).scatter_add(1, ph2word, dur) - dur = dur[:, 1:] - ret['dur'] = dur - if mel2word is None: - mel2word = self.length_regulator(dur).detach() - return mel2word - - def get_pos_embed(self, word2word, x2word): - x_pos = build_word_mask(word2word, x2word).float() # [B, T_word, T_ph] - x_pos = (x_pos.cumsum(-1) / x_pos.sum(-1).clamp(min=1)[..., None] * x_pos).sum(1) - x_pos = self.sin_pos(x_pos.float()) # [B, T_ph, H] - return x_pos - - def store_inverse_all(self): - def remove_weight_norm(m): - try: - if hasattr(m, 'store_inverse'): - m.store_inverse() - nn.utils.remove_weight_norm(m) - except ValueError: # this module didn't have weight norm - return - - self.apply(remove_weight_norm) diff --git a/spaces/AIWaves/Software_Company/src/agents/Component/ExtraComponent.py b/spaces/AIWaves/Software_Company/src/agents/Component/ExtraComponent.py deleted file mode 100644 index 3ae6d6728434d03e8a7194befe0cc1be14b6653f..0000000000000000000000000000000000000000 --- a/spaces/AIWaves/Software_Company/src/agents/Component/ExtraComponent.py +++ /dev/null @@ -1,128 +0,0 @@ -from .ToolComponent import ToolComponent -import json -from utils import flatten_dict,get_embedding,matching_category,search_with_api,limit_keys,limit_values -import os - - -class CategoryRequirementsComponent(ToolComponent): - def __init__(self, information_path): - super().__init__() - self.information_dataset = [] - self.leaf_name = [] - for toy_path in information_path: - with open(toy_path, encoding="utf-8") as json_file: - data = json.load(json_file) - for d in data: - if "/" in d["cat_leaf_name"]: - leaf_names = d["cat_leaf_name"].split("/") + [d["cat_leaf_name"]] - else: - leaf_names = [d["cat_leaf_name"]] - for name in leaf_names: - self.leaf_name.append(name) - new_d = d.copy() - new_d["cat_leaf_name"] = name - new_d["information"] = flatten_dict(new_d["information"]) - self.information_dataset.append(new_d) - - self.target_embbeding = get_embedding( - self.leaf_name - ) - - def search_information(self, category, information_dataset): - knowledge = {} - for d in information_dataset: - if category == d["cat_leaf_name"]: - knowledge = d["information"] - knowledge = { - key: value - for key, value in knowledge.items() - if (value and key != "相关分类") - } - break - return knowledge - - def func(self, agent): - prompt = "" - messages = agent.long_term_memory - outputdict = {} - functions = [ - { - "name": "search_information", - "description": "根据用户所需要购买商品的种类跟用户的需求去寻找用户所需要的商品", - "parameters": { - "type": "object", - "properties": { - "category": { - "type": "string", - "description": "用户现在所需要的商品类别,比如纸尿布,笔记本电脑等,注意,只能有一个", - }, - "requirements": { - "type": "string", - "description": "用户现在的需求,比如说便宜,安踏品牌等等,可以有多个需求,中间以“ ”分隔", - }, - }, - "required": ["category", "requirements"], - }, - } - ] - - response = agent.LLM.get_response( - messages, - None, - None, - functions=functions, - stream=False, - function_call={"name": "search_information"}, - ) - response_message = json.loads(response["function_call"]["arguments"]) - category = ( - response_message["category"] if response_message["category"] else None - ) - requirements = ( - response_message["requirements"] - if response_message["requirements"] - else category - ) - if not (category or requirements): - return {} - - topk_result = matching_category( - category, self.leaf_name, None, self.target_embbeding, top_k=3 - ) - - top1_score = topk_result[1][0] - request_items, top_category = search_with_api(requirements, category) - - - MIN_CATEGORY_SIM = eval(os.environ["MIN_CATEGORY_SIM"] - ) if "MIN_CATEGORY_SIM" in os.environ else 0.7 - - if top1_score > MIN_CATEGORY_SIM: - agent.environment.shared_memory["category"] = topk_result[0][0] - category = topk_result[0][0] - information = self.search_information( - topk_result[0][0], self.information_dataset - ) - information = limit_keys(information, 3) - information = limit_values(information, 2) - prompt += f"""你需要知道的是:用户目前选择的商品是{category},该商品信息为{information}。你需要根据这些商品信息来详细介绍商品,比如详细介绍商品有哪些品牌,有哪些分类等等,并且询问用户是否有更多的需求。""" - if category in top_category: - top_category.remove(category) - - recommend = "\n经过搜索后,推荐商品如下:\n" - prompt += "筛选出的商品如下:\n" - - for i, request_item in enumerate(request_items): - - itemTitle = request_item["itemTitle"] - itemPrice = request_item["itemPrice"] - itemPicUrl = request_item["itemPicUrl"] - recommend += f"[{i}.商品名称:{itemTitle},商品价格:{float(itemPrice)/100}]({itemPicUrl})\n" - prompt += f"[{i}.商品名称:{itemTitle},商品价格:{float(itemPrice)/100}]\n" - outputdict["recommend"] = recommend - print(recommend) - else: - prompt += f"""你需要知道的是:用户目前选择的商品是{category},而我们店里没有这类商品,但是我们店里有一些近似商品,如{top_category},{topk_result[0][0]},你需要对这些近似商品进行介绍,并引导用户购买""" - outputdict["prompt"] = prompt - return outputdict - diff --git a/spaces/ASJMO/freegpt/g4f/Provider/Providers/ChatgptLogin.py b/spaces/ASJMO/freegpt/g4f/Provider/Providers/ChatgptLogin.py deleted file mode 100644 index 9551d15dd5121c4b42f80d0ba547a10f0868563b..0000000000000000000000000000000000000000 --- a/spaces/ASJMO/freegpt/g4f/Provider/Providers/ChatgptLogin.py +++ /dev/null @@ -1,96 +0,0 @@ -import os -from ...typing import sha256, Dict, get_type_hints -import requests -import re -import base64 - -url = 'https://chatgptlogin.ac' -model = ['gpt-3.5-turbo'] -supports_stream = False -needs_auth = False - - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - def get_nonce(): - res = requests.get('https://chatgptlogin.ac/use-chatgpt-free/', headers={ - "Referer": "https://chatgptlogin.ac/use-chatgpt-free/", - "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36' - }) - - src = re.search(r'class="mwai-chat mwai-chatgpt">.*Send

Spearman's ρ

The Spearman's rank correlation coefficient (ρ) is a measure of monotonic correlation between two variables, and is therefore better in catching nonlinear monotonic correlations than Pearson's r. It's value lies between -1 and +1, -1 indicating total negative monotonic correlation, 0 indicating no monotonic correlation and 1 indicating total positive monotonic correlation.

To calculate ρ for two variables X and Y, one divides the covariance of the rank variables of X and Y by the product of their standard deviations.

Pearson's r

The Pearson's correlation coefficient (r) is a measure of linear correlation between two variables. It's value lies between -1 and +1, -1 indicating total negative linear correlation, 0 indicating no linear correlation and 1 indicating total positive linear correlation. Furthermore, r is invariant under separate changes in location and scale of the two variables, implying that for a linear function the angle to the x-axis does not affect r.

To calculate r for two variables X and Y, one divides the covariance of X and Y by the product of their standard deviations.

Kendall's τ

Similarly to Spearman's rank correlation coefficient, the Kendall rank correlation coefficient (τ) measures ordinal association between two variables. It's value lies between -1 and +1, -1 indicating total negative correlation, 0 indicating no correlation and 1 indicating total positive correlation.

To calculate τ for two variables X and Y, one determines the number of concordant and discordant pairs of observations. τ is given by the number of concordant pairs minus the discordant pairs divided by the total number of pairs.

Cramér's V (φc)

Cramér's V is an association measure for nominal random variables. The coefficient ranges from 0 to 1, with 0 indicating independence and 1 indicating perfect association. The empirical estimators used for Cramér's V have been proved to be biased, even for large samples. We use a bias-corrected measure that has been proposed by Bergsma in 2013 that can be found here.

Phik (φk)

Phik (φk) is a new and practical correlation coefficient that works consistently between categorical, ordinal and interval variables, captures non-linear dependency and reverts to the Pearson correlation coefficient in case of a bivariate normal input distribution. There is extensive documentation available here.

Missing values

A simple visualization of nullity by column.
Nullity matrix is a data-dense display which lets you quickly visually pick out patterns in data completion.

Sample

First rows

AGEGENDERRACEDIABETES_CLASSAMPUTATION
050MBlackType 2 diabetes1
147MBlackType 2 diabetes1
276FAsianType 2 diabetes1
357FBlackType 2 diabetes1
467FWhiteType 2 diabetes1
556FWhiteType 2 diabetes1
666FAsianType 2 diabetes1
762FColouredType 1 diabetes1
865FBlackType 2 diabetes1
980FAsianType 1 diabetes1

Last rows

AGEGENDERRACEDIABETES_CLASSAMPUTATION
20060MColouredType 2 diabetes0
20169MWhiteType 2 diabetes0
20273FOtherType 2 diabetes0
20359FAsianType 2 diabetes0
20475FAsianType 2 diabetes0
20548FColouredType 1 diabetes0
20650MColouredType 2 diabetes0
20719FWhiteType 1 diabetes0
20888FBlackType 2 diabetes0
20965FOtherType 2 diabetes0
\ No newline at end of file diff --git a/spaces/Zeebra/chatGPT_whisper_AI_voice_assistant/README.md b/spaces/Zeebra/chatGPT_whisper_AI_voice_assistant/README.md deleted file mode 100644 index 9c7f846687926cb9b1ae96e377f8304d288ff2cb..0000000000000000000000000000000000000000 --- a/spaces/Zeebra/chatGPT_whisper_AI_voice_assistant/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: ChatGPT Whisper AI Voice Assistant -emoji: 📉 -colorFrom: red -colorTo: indigo -sdk: gradio -sdk_version: 3.27.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/abascal/chat_with_data_app/utils/app_helper.py b/spaces/abascal/chat_with_data_app/utils/app_helper.py deleted file mode 100644 index 66ffa3f46cb053e6375d8d38882219d9715fb9bb..0000000000000000000000000000000000000000 --- a/spaces/abascal/chat_with_data_app/utils/app_helper.py +++ /dev/null @@ -1,204 +0,0 @@ -import openai -import gradio as gr -from utils.langchain_helper import init_embedding, read_split_doc, create_db_from_documents, init_llm_qa_chain - -# ---------------------------------------------------------------------------- -# Gradio Interface -def chat_to_your_data_ui(openai_api_key, doc_type, doc_path, chunk_size, chunk_overlap, - llm_name, temperature, share_gradio, image_path): - # ---------------------------------------------------------------------------- - # Interface functionality - # ------------------------ - # When set OpenAI API key : read from text box - def read_key_from_textbox(openai_api_key): - try: - from utils.openai_helper import get_completion - openai.api_key = openai_api_key - response = get_completion("test", model='gpt-3.5-turbo') - return "OpenAI API key set!" - except: - return "OpenAI API key not valid!" - - # ------------------------ - # When reading the document - def reading_doc_msg(doc_type, doc_path): - return f"Reading document {doc_path} of type {doc_type} ..." - def read_doc_msg(): - return "Finished reading the document! Let's chat!" - def clear_chatbot_after_read_doc(): - return "", "" - # ------------------------- - # Init the LLM and read document - def init_read_doc(doc_type, doc_path, chunk_size, chunk_overlap, temperature): - global qa_chain - # Init embedding - embedding = init_embedding(openai.api_key) - - # Read and split document using langchain - print(f"Reading document {doc_path} of type {doc_type} ...") - docs_split = read_split_doc(doc_type, doc_path, chunk_size, chunk_overlap) - # ------------------------- - # Create vector database from data - db = create_db_from_documents(docs_split, embedding) - # ------------------------- - # Init the LLM and qa chain - llm, qa_chain, memory = init_llm_qa_chain(llm_name, temperature, openai.api_key, db) - - # When question - def qa_input_msg_history(question, chat_history): - # QA function that inputs the answer and the history. - # History managed internally by ChatInterface - answer = qa_chain({"question": question})['answer'] - #response = qa_chain({"question": input}) - chat_history.append((question, answer)) - return "", chat_history - - # When clear all (OpenAI API key, document, chatbot) - def clear_all(): - global qa_chain, db - openai.api_key = None - qa_chain = None - db = None - return "OpenAI API key cleared!", "Document cleared!", "", "", "", "" - - # ---------------------------------------------------------------------------- - # UI - with gr.Blocks(theme=gr.themes.Glass()) as demo: - # Description - gr.Markdown( - """ - # Chat to your data - Ask questions to the chatbot about your document. The chatbot will find the answer to your question. - You can modify the document type and provide its path/link. - You may also modify some of the advanced options. - - """) - # ------------------------- - # OpenAI API key (if not provided) - if openai_api_key is None: - gr.Markdown( - """ - ## Provide OpenAI API key - You need to provide an OpenAI API key to use the chatbot. You can create an account and get a key [here](https://platform.openai.com/docs/api-reference/authentication/). - **Delete the key after using the chatbot !!!** (this will set openai.api_key=None) - """, scale=1 - ) - with gr.Row(): - text_openai_api_key = gr.Textbox(label="OpenAI API key", placeholder="Provide OpenAI API key!", scale=4) - btn_openai_api_key = gr.Button("Set OpenAI API key", scale=1) - text_openai_api_key_output = gr.Textbox(label="Reading state", interactive=False, - placeholder="OpenAI API key not provided!", scale=2) - # ------------------------- - # When set OpenAI API key : read from text box - btn_openai_api_key.click(read_key_from_textbox, - inputs=text_openai_api_key, - outputs=text_openai_api_key_output, - queue=False) - # ------------------------- - # Parameters and chatbot image - with gr.Row(): - with gr.Column(scale=2): - # ------------------------- - # Parameters - # Temperature and document type - gr.Markdown( - """ - ## Select parameters - Default parameters are already provided. - """ - ) - # Advanced parameters (hidden) - with gr.Accordion(label="Advanced options",open=False): - gr.Markdown( - """ - The document is split into chunks, keeping semantically related pieces together and with some overlap. - You can modify the chunk size and overlap. The temperature is used to control the randomness of the output - (the lower the temperature the more deterministic the ouput, the higher its value the more random the result, with $temperature\in[0,1]$). - """ - ) - sl_temperature = gr.Slider(minimum=0.0, maximum=1.0, value=temperature, label="Temperature", - scale=2) - with gr.Row(): - num_chunk_size = gr.Number(value=chunk_size, label="Chunk size", scale=1) - num_chunk_overlap = gr.Number(value=chunk_overlap, label="Chunk overlap", scale=1) - - - # Chatbot image - # https://drive.google.com/file/d/1HDnBsdfUYrCHOFtP2-DqomcmBSs9XyNI/view?usp=sharing - # ![](https://drive.google.com/uc?id=1HDnBsdfUYrCHOFtP2-DqomcmBSs9XyNI) - gr.Markdown( - f""" - drawing - """, scale=1) - - # ------------------------- - # Select and read document - gr.Markdown( - """ - ## Select document - Select the document type and provide its path/link (eg. https://en.wikipedia.org/wiki/Lyon). - """) - with gr.Row(): - drop_type = gr.Dropdown(["url", "pdf", "youtube"], - label="Document Type", value=doc_type, min_width=30, scale=1) - text_path = gr.Textbox(label="Document Path/URL", placeholder=doc_path, scale=5) - - with gr.Row(): - # Read document - btn_read = gr.Button("Read document") - text_read_output = gr.Textbox(label="Reading state", interactive=False, placeholder="Select document type and path!") - - # ------------------------- - # Chatbot - gr.Markdown(""" - ## Chatbot - To chat, introduce a question and press enter. - - Question examples: - - - Hi - - - What is the document about? - - - What can visit in Lyon? - """ - ) - # Chatbot - chatbot = gr.Chatbot() - - # Input message - msg = gr.Textbox(label="Question") - - # Clear button - clear = gr.Button("Clear all (API key, document, chatbot))") - - # Init the LLM and read document with default parameters (if API key is provided) - #if openai_api_key is not None: - # init_read_doc(doc_type, doc_path, chunk_size, chunk_overlap, llm_name, temperature, openai_api_key) - # ------------------------- - # When read document (aready read with default parameters) - btn_read.click(reading_doc_msg, # Reading message - inputs=[drop_type, text_path], - outputs=text_read_output).then(init_read_doc, # Init qa chain and read document - inputs=[drop_type, text_path, - num_chunk_size, num_chunk_overlap, - sl_temperature], - queue=False).then(read_doc_msg, # Finished reading message - outputs=text_read_output).then(clear_chatbot_after_read_doc, # Clear chatbot - outputs=[chatbot, msg], queue=False) - # ------------------------- - # When question - msg.submit(qa_input_msg_history, - inputs=[msg, chatbot], - outputs=[msg, chatbot], queue=False)#.then(bot, chatbot, chatbot) - - # When clear - clear.click(clear_all, - outputs=[text_openai_api_key_output, text_read_output, - chatbot, msg, text_openai_api_key, text_path], queue=False) - - - #demo.queue() # To use generator, required for streaming intermediate outputs - demo.launch(share=share_gradio) - - diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/configs/_base_/models/ocrnet_hr18.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/configs/_base_/models/ocrnet_hr18.py deleted file mode 100644 index c60f62a7cdf3f5c5096a7a7e725e8268fddcb057..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/configs/_base_/models/ocrnet_hr18.py +++ /dev/null @@ -1,68 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='CascadeEncoderDecoder', - num_stages=2, - pretrained='open-mmlab://msra/hrnetv2_w18', - backbone=dict( - type='HRNet', - norm_cfg=norm_cfg, - norm_eval=False, - extra=dict( - stage1=dict( - num_modules=1, - num_branches=1, - block='BOTTLENECK', - num_blocks=(4, ), - num_channels=(64, )), - stage2=dict( - num_modules=1, - num_branches=2, - block='BASIC', - num_blocks=(4, 4), - num_channels=(18, 36)), - stage3=dict( - num_modules=4, - num_branches=3, - block='BASIC', - num_blocks=(4, 4, 4), - num_channels=(18, 36, 72)), - stage4=dict( - num_modules=3, - num_branches=4, - block='BASIC', - num_blocks=(4, 4, 4, 4), - num_channels=(18, 36, 72, 144)))), - decode_head=[ - dict( - type='FCNHead', - in_channels=[18, 36, 72, 144], - channels=sum([18, 36, 72, 144]), - in_index=(0, 1, 2, 3), - input_transform='resize_concat', - kernel_size=1, - num_convs=1, - concat_input=False, - dropout_ratio=-1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - dict( - type='OCRHead', - in_channels=[18, 36, 72, 144], - in_index=(0, 1, 2, 3), - input_transform='resize_concat', - channels=512, - ocr_channels=256, - dropout_ratio=-1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - ], - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/cnn/resnet.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/cnn/resnet.py deleted file mode 100644 index 1cb3ac057ee2d52c46fc94685b5d4e698aad8d5f..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/cnn/resnet.py +++ /dev/null @@ -1,316 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import logging - -import torch.nn as nn -import torch.utils.checkpoint as cp - -from .utils import constant_init, kaiming_init - - -def conv3x3(in_planes, out_planes, stride=1, dilation=1): - """3x3 convolution with padding.""" - return nn.Conv2d( - in_planes, - out_planes, - kernel_size=3, - stride=stride, - padding=dilation, - dilation=dilation, - bias=False) - - -class BasicBlock(nn.Module): - expansion = 1 - - def __init__(self, - inplanes, - planes, - stride=1, - dilation=1, - downsample=None, - style='pytorch', - with_cp=False): - super(BasicBlock, self).__init__() - assert style in ['pytorch', 'caffe'] - self.conv1 = conv3x3(inplanes, planes, stride, dilation) - self.bn1 = nn.BatchNorm2d(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes) - self.bn2 = nn.BatchNorm2d(planes) - self.downsample = downsample - self.stride = stride - self.dilation = dilation - assert not with_cp - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class Bottleneck(nn.Module): - expansion = 4 - - def __init__(self, - inplanes, - planes, - stride=1, - dilation=1, - downsample=None, - style='pytorch', - with_cp=False): - """Bottleneck block. - - If style is "pytorch", the stride-two layer is the 3x3 conv layer, if - it is "caffe", the stride-two layer is the first 1x1 conv layer. - """ - super(Bottleneck, self).__init__() - assert style in ['pytorch', 'caffe'] - if style == 'pytorch': - conv1_stride = 1 - conv2_stride = stride - else: - conv1_stride = stride - conv2_stride = 1 - self.conv1 = nn.Conv2d( - inplanes, planes, kernel_size=1, stride=conv1_stride, bias=False) - self.conv2 = nn.Conv2d( - planes, - planes, - kernel_size=3, - stride=conv2_stride, - padding=dilation, - dilation=dilation, - bias=False) - - self.bn1 = nn.BatchNorm2d(planes) - self.bn2 = nn.BatchNorm2d(planes) - self.conv3 = nn.Conv2d( - planes, planes * self.expansion, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * self.expansion) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - self.dilation = dilation - self.with_cp = with_cp - - def forward(self, x): - - def _inner_forward(x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - out = self.relu(out) - - return out - - -def make_res_layer(block, - inplanes, - planes, - blocks, - stride=1, - dilation=1, - style='pytorch', - with_cp=False): - downsample = None - if stride != 1 or inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d( - inplanes, - planes * block.expansion, - kernel_size=1, - stride=stride, - bias=False), - nn.BatchNorm2d(planes * block.expansion), - ) - - layers = [] - layers.append( - block( - inplanes, - planes, - stride, - dilation, - downsample, - style=style, - with_cp=with_cp)) - inplanes = planes * block.expansion - for _ in range(1, blocks): - layers.append( - block(inplanes, planes, 1, dilation, style=style, with_cp=with_cp)) - - return nn.Sequential(*layers) - - -class ResNet(nn.Module): - """ResNet backbone. - - Args: - depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. - num_stages (int): Resnet stages, normally 4. - strides (Sequence[int]): Strides of the first block of each stage. - dilations (Sequence[int]): Dilation of each stage. - out_indices (Sequence[int]): Output from which stages. - style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two - layer is the 3x3 conv layer, otherwise the stride-two layer is - the first 1x1 conv layer. - frozen_stages (int): Stages to be frozen (all param fixed). -1 means - not freezing any parameters. - bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze - running stats (mean and var). - bn_frozen (bool): Whether to freeze weight and bias of BN layers. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - """ - - arch_settings = { - 18: (BasicBlock, (2, 2, 2, 2)), - 34: (BasicBlock, (3, 4, 6, 3)), - 50: (Bottleneck, (3, 4, 6, 3)), - 101: (Bottleneck, (3, 4, 23, 3)), - 152: (Bottleneck, (3, 8, 36, 3)) - } - - def __init__(self, - depth, - num_stages=4, - strides=(1, 2, 2, 2), - dilations=(1, 1, 1, 1), - out_indices=(0, 1, 2, 3), - style='pytorch', - frozen_stages=-1, - bn_eval=True, - bn_frozen=False, - with_cp=False): - super(ResNet, self).__init__() - if depth not in self.arch_settings: - raise KeyError(f'invalid depth {depth} for resnet') - assert num_stages >= 1 and num_stages <= 4 - block, stage_blocks = self.arch_settings[depth] - stage_blocks = stage_blocks[:num_stages] - assert len(strides) == len(dilations) == num_stages - assert max(out_indices) < num_stages - - self.out_indices = out_indices - self.style = style - self.frozen_stages = frozen_stages - self.bn_eval = bn_eval - self.bn_frozen = bn_frozen - self.with_cp = with_cp - - self.inplanes = 64 - self.conv1 = nn.Conv2d( - 3, 64, kernel_size=7, stride=2, padding=3, bias=False) - self.bn1 = nn.BatchNorm2d(64) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - - self.res_layers = [] - for i, num_blocks in enumerate(stage_blocks): - stride = strides[i] - dilation = dilations[i] - planes = 64 * 2**i - res_layer = make_res_layer( - block, - self.inplanes, - planes, - num_blocks, - stride=stride, - dilation=dilation, - style=self.style, - with_cp=with_cp) - self.inplanes = planes * block.expansion - layer_name = f'layer{i + 1}' - self.add_module(layer_name, res_layer) - self.res_layers.append(layer_name) - - self.feat_dim = block.expansion * 64 * 2**(len(stage_blocks) - 1) - - def init_weights(self, pretrained=None): - if isinstance(pretrained, str): - logger = logging.getLogger() - from ..runner import load_checkpoint - load_checkpoint(self, pretrained, strict=False, logger=logger) - elif pretrained is None: - for m in self.modules(): - if isinstance(m, nn.Conv2d): - kaiming_init(m) - elif isinstance(m, nn.BatchNorm2d): - constant_init(m, 1) - else: - raise TypeError('pretrained must be a str or None') - - def forward(self, x): - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - x = self.maxpool(x) - outs = [] - for i, layer_name in enumerate(self.res_layers): - res_layer = getattr(self, layer_name) - x = res_layer(x) - if i in self.out_indices: - outs.append(x) - if len(outs) == 1: - return outs[0] - else: - return tuple(outs) - - def train(self, mode=True): - super(ResNet, self).train(mode) - if self.bn_eval: - for m in self.modules(): - if isinstance(m, nn.BatchNorm2d): - m.eval() - if self.bn_frozen: - for params in m.parameters(): - params.requires_grad = False - if mode and self.frozen_stages >= 0: - for param in self.conv1.parameters(): - param.requires_grad = False - for param in self.bn1.parameters(): - param.requires_grad = False - self.bn1.eval() - self.bn1.weight.requires_grad = False - self.bn1.bias.requires_grad = False - for i in range(1, self.frozen_stages + 1): - mod = getattr(self, f'layer{i}') - mod.eval() - for param in mod.parameters(): - param.requires_grad = False diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/losses/ae_loss.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/losses/ae_loss.py deleted file mode 100644 index cff472aa03080fb49dbb3adba6fec68647a575e6..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/models/losses/ae_loss.py +++ /dev/null @@ -1,102 +0,0 @@ -import mmcv -import torch -import torch.nn as nn -import torch.nn.functional as F - -from ..builder import LOSSES - - -@mmcv.jit(derivate=True, coderize=True) -def ae_loss_per_image(tl_preds, br_preds, match): - """Associative Embedding Loss in one image. - - Associative Embedding Loss including two parts: pull loss and push loss. - Pull loss makes embedding vectors from same object closer to each other. - Push loss distinguish embedding vector from different objects, and makes - the gap between them is large enough. - - During computing, usually there are 3 cases: - - no object in image: both pull loss and push loss will be 0. - - one object in image: push loss will be 0 and pull loss is computed - by the two corner of the only object. - - more than one objects in image: pull loss is computed by corner pairs - from each object, push loss is computed by each object with all - other objects. We use confusion matrix with 0 in diagonal to - compute the push loss. - - Args: - tl_preds (tensor): Embedding feature map of left-top corner. - br_preds (tensor): Embedding feature map of bottim-right corner. - match (list): Downsampled coordinates pair of each ground truth box. - """ - - tl_list, br_list, me_list = [], [], [] - if len(match) == 0: # no object in image - pull_loss = tl_preds.sum() * 0. - push_loss = tl_preds.sum() * 0. - else: - for m in match: - [tl_y, tl_x], [br_y, br_x] = m - tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1) - br_e = br_preds[:, br_y, br_x].view(-1, 1) - tl_list.append(tl_e) - br_list.append(br_e) - me_list.append((tl_e + br_e) / 2.0) - - tl_list = torch.cat(tl_list) - br_list = torch.cat(br_list) - me_list = torch.cat(me_list) - - assert tl_list.size() == br_list.size() - - # N is object number in image, M is dimension of embedding vector - N, M = tl_list.size() - - pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2) - pull_loss = pull_loss.sum() / N - - margin = 1 # exp setting of CornerNet, details in section 3.3 of paper - - # confusion matrix of push loss - conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list - conf_weight = 1 - torch.eye(N).type_as(me_list) - conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs()) - - if N > 1: # more than one object in current image - push_loss = F.relu(conf_mat).sum() / (N * (N - 1)) - else: - push_loss = tl_preds.sum() * 0. - - return pull_loss, push_loss - - -@LOSSES.register_module() -class AssociativeEmbeddingLoss(nn.Module): - """Associative Embedding Loss. - - More details can be found in - `Associative Embedding `_ and - `CornerNet `_ . - Code is modified from `kp_utils.py `_ # noqa: E501 - - Args: - pull_weight (float): Loss weight for corners from same object. - push_weight (float): Loss weight for corners from different object. - """ - - def __init__(self, pull_weight=0.25, push_weight=0.25): - super(AssociativeEmbeddingLoss, self).__init__() - self.pull_weight = pull_weight - self.push_weight = push_weight - - def forward(self, pred, target, match): - """Forward function.""" - batch = pred.size(0) - pull_all, push_all = 0.0, 0.0 - for i in range(batch): - pull, push = ae_loss_per_image(pred[i], target[i], match[i]) - - pull_all += self.pull_weight * pull - push_all += self.push_weight * push - - return pull_all, push_all diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/bbox/coder/__init__.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/bbox/coder/__init__.py deleted file mode 100644 index ae455ba8fc0e0727e2d581cdc8f20fceededf99a..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/bbox/coder/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -from .base_bbox_coder import BaseBBoxCoder -from .bucketing_bbox_coder import BucketingBBoxCoder -from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder -from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder -from .pseudo_bbox_coder import PseudoBBoxCoder -from .tblr_bbox_coder import TBLRBBoxCoder -from .yolo_bbox_coder import YOLOBBoxCoder - -__all__ = [ - 'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder', - 'LegacyDeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'YOLOBBoxCoder', - 'BucketingBBoxCoder' -] diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/dense_heads/paa_head.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/dense_heads/paa_head.py deleted file mode 100644 index e067b0121cf8b8230c0c9c6b8cfd41f56be4e298..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/models/dense_heads/paa_head.py +++ /dev/null @@ -1,671 +0,0 @@ -import numpy as np -import torch -from mmcv.runner import force_fp32 - -from mmdet.core import multi_apply, multiclass_nms -from mmdet.core.bbox.iou_calculators import bbox_overlaps -from mmdet.models import HEADS -from mmdet.models.dense_heads import ATSSHead - -EPS = 1e-12 -try: - import sklearn.mixture as skm -except ImportError: - skm = None - - -def levels_to_images(mlvl_tensor): - """Concat multi-level feature maps by image. - - [feature_level0, feature_level1...] -> [feature_image0, feature_image1...] - Convert the shape of each element in mlvl_tensor from (N, C, H, W) to - (N, H*W , C), then split the element to N elements with shape (H*W, C), and - concat elements in same image of all level along first dimension. - - Args: - mlvl_tensor (list[torch.Tensor]): list of Tensor which collect from - corresponding level. Each element is of shape (N, C, H, W) - - Returns: - list[torch.Tensor]: A list that contains N tensors and each tensor is - of shape (num_elements, C) - """ - batch_size = mlvl_tensor[0].size(0) - batch_list = [[] for _ in range(batch_size)] - channels = mlvl_tensor[0].size(1) - for t in mlvl_tensor: - t = t.permute(0, 2, 3, 1) - t = t.view(batch_size, -1, channels).contiguous() - for img in range(batch_size): - batch_list[img].append(t[img]) - return [torch.cat(item, 0) for item in batch_list] - - -@HEADS.register_module() -class PAAHead(ATSSHead): - """Head of PAAAssignment: Probabilistic Anchor Assignment with IoU - Prediction for Object Detection. - - Code is modified from the `official github repo - `_. - - More details can be found in the `paper - `_ . - - Args: - topk (int): Select topk samples with smallest loss in - each level. - score_voting (bool): Whether to use score voting in post-process. - covariance_type : String describing the type of covariance parameters - to be used in :class:`sklearn.mixture.GaussianMixture`. - It must be one of: - - - 'full': each component has its own general covariance matrix - - 'tied': all components share the same general covariance matrix - - 'diag': each component has its own diagonal covariance matrix - - 'spherical': each component has its own single variance - Default: 'diag'. From 'full' to 'spherical', the gmm fitting - process is faster yet the performance could be influenced. For most - cases, 'diag' should be a good choice. - """ - - def __init__(self, - *args, - topk=9, - score_voting=True, - covariance_type='diag', - **kwargs): - # topk used in paa reassign process - self.topk = topk - self.with_score_voting = score_voting - self.covariance_type = covariance_type - super(PAAHead, self).__init__(*args, **kwargs) - - @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds')) - def loss(self, - cls_scores, - bbox_preds, - iou_preds, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute losses of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W) - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W) - iou_preds (list[Tensor]): iou_preds for each scale - level with shape (N, num_anchors * 1, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (list[Tensor] | None): Specify which bounding - boxes can be ignored when are computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss gmm_assignment. - """ - - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.anchor_generator.num_levels - - device = cls_scores[0].device - anchor_list, valid_flag_list = self.get_anchors( - featmap_sizes, img_metas, device=device) - label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 - cls_reg_targets = self.get_targets( - anchor_list, - valid_flag_list, - gt_bboxes, - img_metas, - gt_bboxes_ignore_list=gt_bboxes_ignore, - gt_labels_list=gt_labels, - label_channels=label_channels, - ) - (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds, - pos_gt_index) = cls_reg_targets - cls_scores = levels_to_images(cls_scores) - cls_scores = [ - item.reshape(-1, self.cls_out_channels) for item in cls_scores - ] - bbox_preds = levels_to_images(bbox_preds) - bbox_preds = [item.reshape(-1, 4) for item in bbox_preds] - iou_preds = levels_to_images(iou_preds) - iou_preds = [item.reshape(-1, 1) for item in iou_preds] - pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list, - cls_scores, bbox_preds, labels, - labels_weight, bboxes_target, - bboxes_weight, pos_inds) - - with torch.no_grad(): - reassign_labels, reassign_label_weight, \ - reassign_bbox_weights, num_pos = multi_apply( - self.paa_reassign, - pos_losses_list, - labels, - labels_weight, - bboxes_weight, - pos_inds, - pos_gt_index, - anchor_list) - num_pos = sum(num_pos) - # convert all tensor list to a flatten tensor - cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1)) - bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1)) - iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1)) - labels = torch.cat(reassign_labels, 0).view(-1) - flatten_anchors = torch.cat( - [torch.cat(item, 0) for item in anchor_list]) - labels_weight = torch.cat(reassign_label_weight, 0).view(-1) - bboxes_target = torch.cat(bboxes_target, - 0).view(-1, bboxes_target[0].size(-1)) - - pos_inds_flatten = ((labels >= 0) - & - (labels < self.num_classes)).nonzero().reshape(-1) - - losses_cls = self.loss_cls( - cls_scores, - labels, - labels_weight, - avg_factor=max(num_pos, len(img_metas))) # avoid num_pos=0 - if num_pos: - pos_bbox_pred = self.bbox_coder.decode( - flatten_anchors[pos_inds_flatten], - bbox_preds[pos_inds_flatten]) - pos_bbox_target = bboxes_target[pos_inds_flatten] - iou_target = bbox_overlaps( - pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True) - losses_iou = self.loss_centerness( - iou_preds[pos_inds_flatten], - iou_target.unsqueeze(-1), - avg_factor=num_pos) - losses_bbox = self.loss_bbox( - pos_bbox_pred, - pos_bbox_target, - iou_target.clamp(min=EPS), - avg_factor=iou_target.sum()) - else: - losses_iou = iou_preds.sum() * 0 - losses_bbox = bbox_preds.sum() * 0 - - return dict( - loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou) - - def get_pos_loss(self, anchors, cls_score, bbox_pred, label, label_weight, - bbox_target, bbox_weight, pos_inds): - """Calculate loss of all potential positive samples obtained from first - match process. - - Args: - anchors (list[Tensor]): Anchors of each scale. - cls_score (Tensor): Box scores of single image with shape - (num_anchors, num_classes) - bbox_pred (Tensor): Box energies / deltas of single image - with shape (num_anchors, 4) - label (Tensor): classification target of each anchor with - shape (num_anchors,) - label_weight (Tensor): Classification loss weight of each - anchor with shape (num_anchors). - bbox_target (dict): Regression target of each anchor with - shape (num_anchors, 4). - bbox_weight (Tensor): Bbox weight of each anchor with shape - (num_anchors, 4). - pos_inds (Tensor): Index of all positive samples got from - first assign process. - - Returns: - Tensor: Losses of all positive samples in single image. - """ - if not len(pos_inds): - return cls_score.new([]), - anchors_all_level = torch.cat(anchors, 0) - pos_scores = cls_score[pos_inds] - pos_bbox_pred = bbox_pred[pos_inds] - pos_label = label[pos_inds] - pos_label_weight = label_weight[pos_inds] - pos_bbox_target = bbox_target[pos_inds] - pos_bbox_weight = bbox_weight[pos_inds] - pos_anchors = anchors_all_level[pos_inds] - pos_bbox_pred = self.bbox_coder.decode(pos_anchors, pos_bbox_pred) - - # to keep loss dimension - loss_cls = self.loss_cls( - pos_scores, - pos_label, - pos_label_weight, - avg_factor=self.loss_cls.loss_weight, - reduction_override='none') - - loss_bbox = self.loss_bbox( - pos_bbox_pred, - pos_bbox_target, - pos_bbox_weight, - avg_factor=self.loss_cls.loss_weight, - reduction_override='none') - - loss_cls = loss_cls.sum(-1) - pos_loss = loss_bbox + loss_cls - return pos_loss, - - def paa_reassign(self, pos_losses, label, label_weight, bbox_weight, - pos_inds, pos_gt_inds, anchors): - """Fit loss to GMM distribution and separate positive, ignore, negative - samples again with GMM model. - - Args: - pos_losses (Tensor): Losses of all positive samples in - single image. - label (Tensor): classification target of each anchor with - shape (num_anchors,) - label_weight (Tensor): Classification loss weight of each - anchor with shape (num_anchors). - bbox_weight (Tensor): Bbox weight of each anchor with shape - (num_anchors, 4). - pos_inds (Tensor): Index of all positive samples got from - first assign process. - pos_gt_inds (Tensor): Gt_index of all positive samples got - from first assign process. - anchors (list[Tensor]): Anchors of each scale. - - Returns: - tuple: Usually returns a tuple containing learning targets. - - - label (Tensor): classification target of each anchor after - paa assign, with shape (num_anchors,) - - label_weight (Tensor): Classification loss weight of each - anchor after paa assign, with shape (num_anchors). - - bbox_weight (Tensor): Bbox weight of each anchor with shape - (num_anchors, 4). - - num_pos (int): The number of positive samples after paa - assign. - """ - if not len(pos_inds): - return label, label_weight, bbox_weight, 0 - label = label.clone() - label_weight = label_weight.clone() - bbox_weight = bbox_weight.clone() - num_gt = pos_gt_inds.max() + 1 - num_level = len(anchors) - num_anchors_each_level = [item.size(0) for item in anchors] - num_anchors_each_level.insert(0, 0) - inds_level_interval = np.cumsum(num_anchors_each_level) - pos_level_mask = [] - for i in range(num_level): - mask = (pos_inds >= inds_level_interval[i]) & ( - pos_inds < inds_level_interval[i + 1]) - pos_level_mask.append(mask) - pos_inds_after_paa = [label.new_tensor([])] - ignore_inds_after_paa = [label.new_tensor([])] - for gt_ind in range(num_gt): - pos_inds_gmm = [] - pos_loss_gmm = [] - gt_mask = pos_gt_inds == gt_ind - for level in range(num_level): - level_mask = pos_level_mask[level] - level_gt_mask = level_mask & gt_mask - value, topk_inds = pos_losses[level_gt_mask].topk( - min(level_gt_mask.sum(), self.topk), largest=False) - pos_inds_gmm.append(pos_inds[level_gt_mask][topk_inds]) - pos_loss_gmm.append(value) - pos_inds_gmm = torch.cat(pos_inds_gmm) - pos_loss_gmm = torch.cat(pos_loss_gmm) - # fix gmm need at least two sample - if len(pos_inds_gmm) < 2: - continue - device = pos_inds_gmm.device - pos_loss_gmm, sort_inds = pos_loss_gmm.sort() - pos_inds_gmm = pos_inds_gmm[sort_inds] - pos_loss_gmm = pos_loss_gmm.view(-1, 1).cpu().numpy() - min_loss, max_loss = pos_loss_gmm.min(), pos_loss_gmm.max() - means_init = np.array([min_loss, max_loss]).reshape(2, 1) - weights_init = np.array([0.5, 0.5]) - precisions_init = np.array([1.0, 1.0]).reshape(2, 1, 1) # full - if self.covariance_type == 'spherical': - precisions_init = precisions_init.reshape(2) - elif self.covariance_type == 'diag': - precisions_init = precisions_init.reshape(2, 1) - elif self.covariance_type == 'tied': - precisions_init = np.array([[1.0]]) - if skm is None: - raise ImportError('Please run "pip install sklearn" ' - 'to install sklearn first.') - gmm = skm.GaussianMixture( - 2, - weights_init=weights_init, - means_init=means_init, - precisions_init=precisions_init, - covariance_type=self.covariance_type) - gmm.fit(pos_loss_gmm) - gmm_assignment = gmm.predict(pos_loss_gmm) - scores = gmm.score_samples(pos_loss_gmm) - gmm_assignment = torch.from_numpy(gmm_assignment).to(device) - scores = torch.from_numpy(scores).to(device) - - pos_inds_temp, ignore_inds_temp = self.gmm_separation_scheme( - gmm_assignment, scores, pos_inds_gmm) - pos_inds_after_paa.append(pos_inds_temp) - ignore_inds_after_paa.append(ignore_inds_temp) - - pos_inds_after_paa = torch.cat(pos_inds_after_paa) - ignore_inds_after_paa = torch.cat(ignore_inds_after_paa) - reassign_mask = (pos_inds.unsqueeze(1) != pos_inds_after_paa).all(1) - reassign_ids = pos_inds[reassign_mask] - label[reassign_ids] = self.num_classes - label_weight[ignore_inds_after_paa] = 0 - bbox_weight[reassign_ids] = 0 - num_pos = len(pos_inds_after_paa) - return label, label_weight, bbox_weight, num_pos - - def gmm_separation_scheme(self, gmm_assignment, scores, pos_inds_gmm): - """A general separation scheme for gmm model. - - It separates a GMM distribution of candidate samples into three - parts, 0 1 and uncertain areas, and you can implement other - separation schemes by rewriting this function. - - Args: - gmm_assignment (Tensor): The prediction of GMM which is of shape - (num_samples,). The 0/1 value indicates the distribution - that each sample comes from. - scores (Tensor): The probability of sample coming from the - fit GMM distribution. The tensor is of shape (num_samples,). - pos_inds_gmm (Tensor): All the indexes of samples which are used - to fit GMM model. The tensor is of shape (num_samples,) - - Returns: - tuple[Tensor]: The indices of positive and ignored samples. - - - pos_inds_temp (Tensor): Indices of positive samples. - - ignore_inds_temp (Tensor): Indices of ignore samples. - """ - # The implementation is (c) in Fig.3 in origin paper instead of (b). - # You can refer to issues such as - # https://github.com/kkhoot/PAA/issues/8 and - # https://github.com/kkhoot/PAA/issues/9. - fgs = gmm_assignment == 0 - pos_inds_temp = fgs.new_tensor([], dtype=torch.long) - ignore_inds_temp = fgs.new_tensor([], dtype=torch.long) - if fgs.nonzero().numel(): - _, pos_thr_ind = scores[fgs].topk(1) - pos_inds_temp = pos_inds_gmm[fgs][:pos_thr_ind + 1] - ignore_inds_temp = pos_inds_gmm.new_tensor([]) - return pos_inds_temp, ignore_inds_temp - - def get_targets( - self, - anchor_list, - valid_flag_list, - gt_bboxes_list, - img_metas, - gt_bboxes_ignore_list=None, - gt_labels_list=None, - label_channels=1, - unmap_outputs=True, - ): - """Get targets for PAA head. - - This method is almost the same as `AnchorHead.get_targets()`. We direct - return the results from _get_targets_single instead map it to levels - by images_to_levels function. - - Args: - anchor_list (list[list[Tensor]]): Multi level anchors of each - image. The outer list indicates images, and the inner list - corresponds to feature levels of the image. Each element of - the inner list is a tensor of shape (num_anchors, 4). - valid_flag_list (list[list[Tensor]]): Multi level valid flags of - each image. The outer list indicates images, and the inner list - corresponds to feature levels of the image. Each element of - the inner list is a tensor of shape (num_anchors, ) - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. - img_metas (list[dict]): Meta info of each image. - gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be - ignored. - gt_labels_list (list[Tensor]): Ground truth labels of each box. - label_channels (int): Channel of label. - unmap_outputs (bool): Whether to map outputs back to the original - set of anchors. - - Returns: - tuple: Usually returns a tuple containing learning targets. - - - labels (list[Tensor]): Labels of all anchors, each with - shape (num_anchors,). - - label_weights (list[Tensor]): Label weights of all anchor. - each with shape (num_anchors,). - - bbox_targets (list[Tensor]): BBox targets of all anchors. - each with shape (num_anchors, 4). - - bbox_weights (list[Tensor]): BBox weights of all anchors. - each with shape (num_anchors, 4). - - pos_inds (list[Tensor]): Contains all index of positive - sample in all anchor. - - gt_inds (list[Tensor]): Contains all gt_index of positive - sample in all anchor. - """ - - num_imgs = len(img_metas) - assert len(anchor_list) == len(valid_flag_list) == num_imgs - concat_anchor_list = [] - concat_valid_flag_list = [] - for i in range(num_imgs): - assert len(anchor_list[i]) == len(valid_flag_list[i]) - concat_anchor_list.append(torch.cat(anchor_list[i])) - concat_valid_flag_list.append(torch.cat(valid_flag_list[i])) - - # compute targets for each image - if gt_bboxes_ignore_list is None: - gt_bboxes_ignore_list = [None for _ in range(num_imgs)] - if gt_labels_list is None: - gt_labels_list = [None for _ in range(num_imgs)] - results = multi_apply( - self._get_targets_single, - concat_anchor_list, - concat_valid_flag_list, - gt_bboxes_list, - gt_bboxes_ignore_list, - gt_labels_list, - img_metas, - label_channels=label_channels, - unmap_outputs=unmap_outputs) - - (labels, label_weights, bbox_targets, bbox_weights, valid_pos_inds, - valid_neg_inds, sampling_result) = results - - # Due to valid flag of anchors, we have to calculate the real pos_inds - # in origin anchor set. - pos_inds = [] - for i, single_labels in enumerate(labels): - pos_mask = (0 <= single_labels) & ( - single_labels < self.num_classes) - pos_inds.append(pos_mask.nonzero().view(-1)) - - gt_inds = [item.pos_assigned_gt_inds for item in sampling_result] - return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, - gt_inds) - - def _get_targets_single(self, - flat_anchors, - valid_flags, - gt_bboxes, - gt_bboxes_ignore, - gt_labels, - img_meta, - label_channels=1, - unmap_outputs=True): - """Compute regression and classification targets for anchors in a - single image. - - This method is same as `AnchorHead._get_targets_single()`. - """ - assert unmap_outputs, 'We must map outputs back to the original' \ - 'set of anchors in PAAhead' - return super(ATSSHead, self)._get_targets_single( - flat_anchors, - valid_flags, - gt_bboxes, - gt_bboxes_ignore, - gt_labels, - img_meta, - label_channels=1, - unmap_outputs=True) - - def _get_bboxes(self, - cls_scores, - bbox_preds, - iou_preds, - mlvl_anchors, - img_shapes, - scale_factors, - cfg, - rescale=False, - with_nms=True): - """Transform outputs for a single batch item into labeled boxes. - - This method is almost same as `ATSSHead._get_bboxes()`. - We use sqrt(iou_preds * cls_scores) in NMS process instead of just - cls_scores. Besides, score voting is used when `` score_voting`` - is set to True. - """ - assert with_nms, 'PAA only supports "with_nms=True" now' - assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors) - batch_size = cls_scores[0].shape[0] - - mlvl_bboxes = [] - mlvl_scores = [] - mlvl_iou_preds = [] - for cls_score, bbox_pred, iou_preds, anchors in zip( - cls_scores, bbox_preds, iou_preds, mlvl_anchors): - assert cls_score.size()[-2:] == bbox_pred.size()[-2:] - - scores = cls_score.permute(0, 2, 3, 1).reshape( - batch_size, -1, self.cls_out_channels).sigmoid() - bbox_pred = bbox_pred.permute(0, 2, 3, - 1).reshape(batch_size, -1, 4) - iou_preds = iou_preds.permute(0, 2, 3, 1).reshape(batch_size, - -1).sigmoid() - - nms_pre = cfg.get('nms_pre', -1) - if nms_pre > 0 and scores.shape[1] > nms_pre: - max_scores, _ = (scores * iou_preds[..., None]).sqrt().max(-1) - _, topk_inds = max_scores.topk(nms_pre) - batch_inds = torch.arange(batch_size).view( - -1, 1).expand_as(topk_inds).long() - anchors = anchors[topk_inds, :] - bbox_pred = bbox_pred[batch_inds, topk_inds, :] - scores = scores[batch_inds, topk_inds, :] - iou_preds = iou_preds[batch_inds, topk_inds] - else: - anchors = anchors.expand_as(bbox_pred) - - bboxes = self.bbox_coder.decode( - anchors, bbox_pred, max_shape=img_shapes) - mlvl_bboxes.append(bboxes) - mlvl_scores.append(scores) - mlvl_iou_preds.append(iou_preds) - - batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1) - if rescale: - batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor( - scale_factors).unsqueeze(1) - batch_mlvl_scores = torch.cat(mlvl_scores, dim=1) - # Add a dummy background class to the backend when using sigmoid - # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 - # BG cat_id: num_class - padding = batch_mlvl_scores.new_zeros(batch_size, - batch_mlvl_scores.shape[1], 1) - batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1) - batch_mlvl_iou_preds = torch.cat(mlvl_iou_preds, dim=1) - batch_mlvl_nms_scores = (batch_mlvl_scores * - batch_mlvl_iou_preds[..., None]).sqrt() - - det_results = [] - for (mlvl_bboxes, mlvl_scores) in zip(batch_mlvl_bboxes, - batch_mlvl_nms_scores): - det_bbox, det_label = multiclass_nms( - mlvl_bboxes, - mlvl_scores, - cfg.score_thr, - cfg.nms, - cfg.max_per_img, - score_factors=None) - if self.with_score_voting and len(det_bbox) > 0: - det_bbox, det_label = self.score_voting( - det_bbox, det_label, mlvl_bboxes, mlvl_scores, - cfg.score_thr) - det_results.append(tuple([det_bbox, det_label])) - - return det_results - - def score_voting(self, det_bboxes, det_labels, mlvl_bboxes, - mlvl_nms_scores, score_thr): - """Implementation of score voting method works on each remaining boxes - after NMS procedure. - - Args: - det_bboxes (Tensor): Remaining boxes after NMS procedure, - with shape (k, 5), each dimension means - (x1, y1, x2, y2, score). - det_labels (Tensor): The label of remaining boxes, with shape - (k, 1),Labels are 0-based. - mlvl_bboxes (Tensor): All boxes before the NMS procedure, - with shape (num_anchors,4). - mlvl_nms_scores (Tensor): The scores of all boxes which is used - in the NMS procedure, with shape (num_anchors, num_class) - mlvl_iou_preds (Tensor): The predictions of IOU of all boxes - before the NMS procedure, with shape (num_anchors, 1) - score_thr (float): The score threshold of bboxes. - - Returns: - tuple: Usually returns a tuple containing voting results. - - - det_bboxes_voted (Tensor): Remaining boxes after - score voting procedure, with shape (k, 5), each - dimension means (x1, y1, x2, y2, score). - - det_labels_voted (Tensor): Label of remaining bboxes - after voting, with shape (num_anchors,). - """ - candidate_mask = mlvl_nms_scores > score_thr - candidate_mask_nonzeros = candidate_mask.nonzero() - candidate_inds = candidate_mask_nonzeros[:, 0] - candidate_labels = candidate_mask_nonzeros[:, 1] - candidate_bboxes = mlvl_bboxes[candidate_inds] - candidate_scores = mlvl_nms_scores[candidate_mask] - det_bboxes_voted = [] - det_labels_voted = [] - for cls in range(self.cls_out_channels): - candidate_cls_mask = candidate_labels == cls - if not candidate_cls_mask.any(): - continue - candidate_cls_scores = candidate_scores[candidate_cls_mask] - candidate_cls_bboxes = candidate_bboxes[candidate_cls_mask] - det_cls_mask = det_labels == cls - det_cls_bboxes = det_bboxes[det_cls_mask].view( - -1, det_bboxes.size(-1)) - det_candidate_ious = bbox_overlaps(det_cls_bboxes[:, :4], - candidate_cls_bboxes) - for det_ind in range(len(det_cls_bboxes)): - single_det_ious = det_candidate_ious[det_ind] - pos_ious_mask = single_det_ious > 0.01 - pos_ious = single_det_ious[pos_ious_mask] - pos_bboxes = candidate_cls_bboxes[pos_ious_mask] - pos_scores = candidate_cls_scores[pos_ious_mask] - pis = (torch.exp(-(1 - pos_ious)**2 / 0.025) * - pos_scores)[:, None] - voted_box = torch.sum( - pis * pos_bboxes, dim=0) / torch.sum( - pis, dim=0) - voted_score = det_cls_bboxes[det_ind][-1:][None, :] - det_bboxes_voted.append( - torch.cat((voted_box[None, :], voted_score), dim=1)) - det_labels_voted.append(cls) - - det_bboxes_voted = torch.cat(det_bboxes_voted, dim=0) - det_labels_voted = det_labels.new_tensor(det_labels_voted) - return det_bboxes_voted, det_labels_voted diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/parallel/_functions.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/parallel/_functions.py deleted file mode 100644 index 9b5a8a44483ab991411d07122b22a1d027e4be8e..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/parallel/_functions.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -from torch.nn.parallel._functions import _get_stream - - -def scatter(input, devices, streams=None): - """Scatters tensor across multiple GPUs.""" - if streams is None: - streams = [None] * len(devices) - - if isinstance(input, list): - chunk_size = (len(input) - 1) // len(devices) + 1 - outputs = [ - scatter(input[i], [devices[i // chunk_size]], - [streams[i // chunk_size]]) for i in range(len(input)) - ] - return outputs - elif isinstance(input, torch.Tensor): - output = input.contiguous() - # TODO: copy to a pinned buffer first (if copying from CPU) - stream = streams[0] if output.numel() > 0 else None - if devices != [-1]: - with torch.cuda.device(devices[0]), torch.cuda.stream(stream): - output = output.cuda(devices[0], non_blocking=True) - else: - # unsqueeze the first dimension thus the tensor's shape is the - # same as those scattered with GPU. - output = output.unsqueeze(0) - return output - else: - raise Exception(f'Unknown type {type(input)}.') - - -def synchronize_stream(output, devices, streams): - if isinstance(output, list): - chunk_size = len(output) // len(devices) - for i in range(len(devices)): - for j in range(chunk_size): - synchronize_stream(output[i * chunk_size + j], [devices[i]], - [streams[i]]) - elif isinstance(output, torch.Tensor): - if output.numel() != 0: - with torch.cuda.device(devices[0]): - main_stream = torch.cuda.current_stream() - main_stream.wait_stream(streams[0]) - output.record_stream(main_stream) - else: - raise Exception(f'Unknown type {type(output)}.') - - -def get_input_device(input): - if isinstance(input, list): - for item in input: - input_device = get_input_device(item) - if input_device != -1: - return input_device - return -1 - elif isinstance(input, torch.Tensor): - return input.get_device() if input.is_cuda else -1 - else: - raise Exception(f'Unknown type {type(input)}.') - - -class Scatter: - - @staticmethod - def forward(target_gpus, input): - input_device = get_input_device(input) - streams = None - if input_device == -1 and target_gpus != [-1]: - # Perform CPU to GPU copies in a background stream - streams = [_get_stream(device) for device in target_gpus] - - outputs = scatter(input, target_gpus, streams) - # Synchronize with the copy stream - if streams is not None: - synchronize_stream(outputs, target_gpus, streams) - - return tuple(outputs) diff --git a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/parallel_wavegan/layers/__init__.py b/spaces/akhaliq/VQMIVC/ParallelWaveGAN/parallel_wavegan/layers/__init__.py deleted file mode 100644 index ac0b7f142ce105f662f69f3e0c5d4967b5c86c22..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/parallel_wavegan/layers/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .causal_conv import * # NOQA -from .pqmf import * # NOQA -from .residual_block import * # NOQA -from .residual_stack import * # NOQA -from .tade_res_block import * # NOQA -from .upsample import * # NOQA diff --git a/spaces/alaka/tinder-data-explorer/README.md b/spaces/alaka/tinder-data-explorer/README.md deleted file mode 100644 index 3b1a6edd64b59e3db61b5848414c14b04cfd3ac9..0000000000000000000000000000000000000000 --- a/spaces/alaka/tinder-data-explorer/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Demo Space -emoji: 🤗 -colorFrom: yellow -colorTo: orange -sdk: gradio -sdk_version: 3.7 -app_file: app.py -pinned: false ---- - -# Rest of the readme -# conda env: gradio diff --git a/spaces/albertvillanova/datasets-tagging/README.md b/spaces/albertvillanova/datasets-tagging/README.md deleted file mode 100644 index b7735070e2fbf2f995d70ed291a19bdb8a4de160..0000000000000000000000000000000000000000 --- a/spaces/albertvillanova/datasets-tagging/README.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: Datasets Tagging -emoji: 🤗 -colorFrom: pink -colorTo: blue -sdk: streamlit -app_file: tagging_app.py -pinned: false ---- - -# datasets-tagging -A Streamlit app to add structured tags to a dataset card. -Available online [here!](https://huggingface.co/spaces/huggingface/datasets-tagging) - - -1. `pip install -r requirements.txt` -2. `./build_metadata_file.py` will build an up-to-date metadata file from the `datasets/` repo (clones it locally) -3. `streamlit run tagging_app.py` - -This will give you a `localhost` link you can click to open in your browser. - -The app initialization on the first run takes a few minutes, subsequent runs are faster. - -Make sure to hit the `Done? Save to File!` button in the right column when you're done tagging a config! diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/chardet/version.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/chardet/version.py deleted file mode 100644 index 70369b9d663414c24f1e042c5b30a8f8c7bbd2b2..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/chardet/version.py +++ /dev/null @@ -1,9 +0,0 @@ -""" -This module exists only to simplify retrieving the version number of chardet -from within setup.py and from chardet subpackages. - -:author: Dan Blanchard (dan.blanchard@gmail.com) -""" - -__version__ = "4.0.0" -VERSION = __version__.split('.') diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/idna/codec.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/idna/codec.py deleted file mode 100644 index 1ca9ba62c208527b796b49306f4b8c95eb868a51..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/idna/codec.py +++ /dev/null @@ -1,112 +0,0 @@ -from .core import encode, decode, alabel, ulabel, IDNAError -import codecs -import re -from typing import Tuple, Optional - -_unicode_dots_re = re.compile('[\u002e\u3002\uff0e\uff61]') - -class Codec(codecs.Codec): - - def encode(self, data: str, errors: str = 'strict') -> Tuple[bytes, int]: - if errors != 'strict': - raise IDNAError('Unsupported error handling \"{}\"'.format(errors)) - - if not data: - return b"", 0 - - return encode(data), len(data) - - def decode(self, data: bytes, errors: str = 'strict') -> Tuple[str, int]: - if errors != 'strict': - raise IDNAError('Unsupported error handling \"{}\"'.format(errors)) - - if not data: - return '', 0 - - return decode(data), len(data) - -class IncrementalEncoder(codecs.BufferedIncrementalEncoder): - def _buffer_encode(self, data: str, errors: str, final: bool) -> Tuple[str, int]: # type: ignore - if errors != 'strict': - raise IDNAError('Unsupported error handling \"{}\"'.format(errors)) - - if not data: - return "", 0 - - labels = _unicode_dots_re.split(data) - trailing_dot = '' - if labels: - if not labels[-1]: - trailing_dot = '.' - del labels[-1] - elif not final: - # Keep potentially unfinished label until the next call - del labels[-1] - if labels: - trailing_dot = '.' - - result = [] - size = 0 - for label in labels: - result.append(alabel(label)) - if size: - size += 1 - size += len(label) - - # Join with U+002E - result_str = '.'.join(result) + trailing_dot # type: ignore - size += len(trailing_dot) - return result_str, size - -class IncrementalDecoder(codecs.BufferedIncrementalDecoder): - def _buffer_decode(self, data: str, errors: str, final: bool) -> Tuple[str, int]: # type: ignore - if errors != 'strict': - raise IDNAError('Unsupported error handling \"{}\"'.format(errors)) - - if not data: - return ('', 0) - - labels = _unicode_dots_re.split(data) - trailing_dot = '' - if labels: - if not labels[-1]: - trailing_dot = '.' - del labels[-1] - elif not final: - # Keep potentially unfinished label until the next call - del labels[-1] - if labels: - trailing_dot = '.' - - result = [] - size = 0 - for label in labels: - result.append(ulabel(label)) - if size: - size += 1 - size += len(label) - - result_str = '.'.join(result) + trailing_dot - size += len(trailing_dot) - return (result_str, size) - - -class StreamWriter(Codec, codecs.StreamWriter): - pass - - -class StreamReader(Codec, codecs.StreamReader): - pass - - -def getregentry() -> codecs.CodecInfo: - # Compatibility as a search_function for codecs.register() - return codecs.CodecInfo( - name='idna', - encode=Codec().encode, # type: ignore - decode=Codec().decode, # type: ignore - incrementalencoder=IncrementalEncoder, - incrementaldecoder=IncrementalDecoder, - streamwriter=StreamWriter, - streamreader=StreamReader, - ) diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pep517/in_process/_in_process.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pep517/in_process/_in_process.py deleted file mode 100644 index 954a4ab05e9b8295f6d455a339654779ee7ec3c8..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pep517/in_process/_in_process.py +++ /dev/null @@ -1,363 +0,0 @@ -"""This is invoked in a subprocess to call the build backend hooks. - -It expects: -- Command line args: hook_name, control_dir -- Environment variables: - PEP517_BUILD_BACKEND=entry.point:spec - PEP517_BACKEND_PATH=paths (separated with os.pathsep) -- control_dir/input.json: - - {"kwargs": {...}} - -Results: -- control_dir/output.json - - {"return_val": ...} -""" -from glob import glob -from importlib import import_module -import json -import os -import os.path -from os.path import join as pjoin -import re -import shutil -import sys -import traceback - -# This file is run as a script, and `import compat` is not zip-safe, so we -# include write_json() and read_json() from compat.py. -# -# Handle reading and writing JSON in UTF-8, on Python 3 and 2. - -if sys.version_info[0] >= 3: - # Python 3 - def write_json(obj, path, **kwargs): - with open(path, 'w', encoding='utf-8') as f: - json.dump(obj, f, **kwargs) - - def read_json(path): - with open(path, 'r', encoding='utf-8') as f: - return json.load(f) - -else: - # Python 2 - def write_json(obj, path, **kwargs): - with open(path, 'wb') as f: - json.dump(obj, f, encoding='utf-8', **kwargs) - - def read_json(path): - with open(path, 'rb') as f: - return json.load(f) - - -class BackendUnavailable(Exception): - """Raised if we cannot import the backend""" - def __init__(self, traceback): - self.traceback = traceback - - -class BackendInvalid(Exception): - """Raised if the backend is invalid""" - def __init__(self, message): - self.message = message - - -class HookMissing(Exception): - """Raised if a hook is missing and we are not executing the fallback""" - def __init__(self, hook_name=None): - super(HookMissing, self).__init__(hook_name) - self.hook_name = hook_name - - -def contained_in(filename, directory): - """Test if a file is located within the given directory.""" - filename = os.path.normcase(os.path.abspath(filename)) - directory = os.path.normcase(os.path.abspath(directory)) - return os.path.commonprefix([filename, directory]) == directory - - -def _build_backend(): - """Find and load the build backend""" - # Add in-tree backend directories to the front of sys.path. - backend_path = os.environ.get('PEP517_BACKEND_PATH') - if backend_path: - extra_pathitems = backend_path.split(os.pathsep) - sys.path[:0] = extra_pathitems - - ep = os.environ['PEP517_BUILD_BACKEND'] - mod_path, _, obj_path = ep.partition(':') - try: - obj = import_module(mod_path) - except ImportError: - raise BackendUnavailable(traceback.format_exc()) - - if backend_path: - if not any( - contained_in(obj.__file__, path) - for path in extra_pathitems - ): - raise BackendInvalid("Backend was not loaded from backend-path") - - if obj_path: - for path_part in obj_path.split('.'): - obj = getattr(obj, path_part) - return obj - - -def _supported_features(): - """Return the list of options features supported by the backend. - - Returns a list of strings. - The only possible value is 'build_editable'. - """ - backend = _build_backend() - features = [] - if hasattr(backend, "build_editable"): - features.append("build_editable") - return features - - -def get_requires_for_build_wheel(config_settings): - """Invoke the optional get_requires_for_build_wheel hook - - Returns [] if the hook is not defined. - """ - backend = _build_backend() - try: - hook = backend.get_requires_for_build_wheel - except AttributeError: - return [] - else: - return hook(config_settings) - - -def get_requires_for_build_editable(config_settings): - """Invoke the optional get_requires_for_build_editable hook - - Returns [] if the hook is not defined. - """ - backend = _build_backend() - try: - hook = backend.get_requires_for_build_editable - except AttributeError: - return [] - else: - return hook(config_settings) - - -def prepare_metadata_for_build_wheel( - metadata_directory, config_settings, _allow_fallback): - """Invoke optional prepare_metadata_for_build_wheel - - Implements a fallback by building a wheel if the hook isn't defined, - unless _allow_fallback is False in which case HookMissing is raised. - """ - backend = _build_backend() - try: - hook = backend.prepare_metadata_for_build_wheel - except AttributeError: - if not _allow_fallback: - raise HookMissing() - whl_basename = backend.build_wheel(metadata_directory, config_settings) - return _get_wheel_metadata_from_wheel(whl_basename, metadata_directory, - config_settings) - else: - return hook(metadata_directory, config_settings) - - -def prepare_metadata_for_build_editable( - metadata_directory, config_settings, _allow_fallback): - """Invoke optional prepare_metadata_for_build_editable - - Implements a fallback by building an editable wheel if the hook isn't - defined, unless _allow_fallback is False in which case HookMissing is - raised. - """ - backend = _build_backend() - try: - hook = backend.prepare_metadata_for_build_editable - except AttributeError: - if not _allow_fallback: - raise HookMissing() - try: - build_hook = backend.build_editable - except AttributeError: - raise HookMissing(hook_name='build_editable') - else: - whl_basename = build_hook(metadata_directory, config_settings) - return _get_wheel_metadata_from_wheel(whl_basename, - metadata_directory, - config_settings) - else: - return hook(metadata_directory, config_settings) - - -WHEEL_BUILT_MARKER = 'PEP517_ALREADY_BUILT_WHEEL' - - -def _dist_info_files(whl_zip): - """Identify the .dist-info folder inside a wheel ZipFile.""" - res = [] - for path in whl_zip.namelist(): - m = re.match(r'[^/\\]+-[^/\\]+\.dist-info/', path) - if m: - res.append(path) - if res: - return res - raise Exception("No .dist-info folder found in wheel") - - -def _get_wheel_metadata_from_wheel( - whl_basename, metadata_directory, config_settings): - """Extract the metadata from a wheel. - - Fallback for when the build backend does not - define the 'get_wheel_metadata' hook. - """ - from zipfile import ZipFile - with open(os.path.join(metadata_directory, WHEEL_BUILT_MARKER), 'wb'): - pass # Touch marker file - - whl_file = os.path.join(metadata_directory, whl_basename) - with ZipFile(whl_file) as zipf: - dist_info = _dist_info_files(zipf) - zipf.extractall(path=metadata_directory, members=dist_info) - return dist_info[0].split('/')[0] - - -def _find_already_built_wheel(metadata_directory): - """Check for a wheel already built during the get_wheel_metadata hook. - """ - if not metadata_directory: - return None - metadata_parent = os.path.dirname(metadata_directory) - if not os.path.isfile(pjoin(metadata_parent, WHEEL_BUILT_MARKER)): - return None - - whl_files = glob(os.path.join(metadata_parent, '*.whl')) - if not whl_files: - print('Found wheel built marker, but no .whl files') - return None - if len(whl_files) > 1: - print('Found multiple .whl files; unspecified behaviour. ' - 'Will call build_wheel.') - return None - - # Exactly one .whl file - return whl_files[0] - - -def build_wheel(wheel_directory, config_settings, metadata_directory=None): - """Invoke the mandatory build_wheel hook. - - If a wheel was already built in the - prepare_metadata_for_build_wheel fallback, this - will copy it rather than rebuilding the wheel. - """ - prebuilt_whl = _find_already_built_wheel(metadata_directory) - if prebuilt_whl: - shutil.copy2(prebuilt_whl, wheel_directory) - return os.path.basename(prebuilt_whl) - - return _build_backend().build_wheel(wheel_directory, config_settings, - metadata_directory) - - -def build_editable(wheel_directory, config_settings, metadata_directory=None): - """Invoke the optional build_editable hook. - - If a wheel was already built in the - prepare_metadata_for_build_editable fallback, this - will copy it rather than rebuilding the wheel. - """ - backend = _build_backend() - try: - hook = backend.build_editable - except AttributeError: - raise HookMissing() - else: - prebuilt_whl = _find_already_built_wheel(metadata_directory) - if prebuilt_whl: - shutil.copy2(prebuilt_whl, wheel_directory) - return os.path.basename(prebuilt_whl) - - return hook(wheel_directory, config_settings, metadata_directory) - - -def get_requires_for_build_sdist(config_settings): - """Invoke the optional get_requires_for_build_wheel hook - - Returns [] if the hook is not defined. - """ - backend = _build_backend() - try: - hook = backend.get_requires_for_build_sdist - except AttributeError: - return [] - else: - return hook(config_settings) - - -class _DummyException(Exception): - """Nothing should ever raise this exception""" - - -class GotUnsupportedOperation(Exception): - """For internal use when backend raises UnsupportedOperation""" - def __init__(self, traceback): - self.traceback = traceback - - -def build_sdist(sdist_directory, config_settings): - """Invoke the mandatory build_sdist hook.""" - backend = _build_backend() - try: - return backend.build_sdist(sdist_directory, config_settings) - except getattr(backend, 'UnsupportedOperation', _DummyException): - raise GotUnsupportedOperation(traceback.format_exc()) - - -HOOK_NAMES = { - 'get_requires_for_build_wheel', - 'prepare_metadata_for_build_wheel', - 'build_wheel', - 'get_requires_for_build_editable', - 'prepare_metadata_for_build_editable', - 'build_editable', - 'get_requires_for_build_sdist', - 'build_sdist', - '_supported_features', -} - - -def main(): - if len(sys.argv) < 3: - sys.exit("Needs args: hook_name, control_dir") - hook_name = sys.argv[1] - control_dir = sys.argv[2] - if hook_name not in HOOK_NAMES: - sys.exit("Unknown hook: %s" % hook_name) - hook = globals()[hook_name] - - hook_input = read_json(pjoin(control_dir, 'input.json')) - - json_out = {'unsupported': False, 'return_val': None} - try: - json_out['return_val'] = hook(**hook_input['kwargs']) - except BackendUnavailable as e: - json_out['no_backend'] = True - json_out['traceback'] = e.traceback - except BackendInvalid as e: - json_out['backend_invalid'] = True - json_out['backend_error'] = e.message - except GotUnsupportedOperation as e: - json_out['unsupported'] = True - json_out['traceback'] = e.traceback - except HookMissing as e: - json_out['hook_missing'] = True - json_out['missing_hook_name'] = e.hook_name or hook_name - - write_json(json_out, pjoin(control_dir, 'output.json'), indent=2) - - -if __name__ == '__main__': - main() diff --git a/spaces/ali-ghamdan/gfp-Gans/gfpgan/archs/gfpgan_bilinear_arch.py b/spaces/ali-ghamdan/gfp-Gans/gfpgan/archs/gfpgan_bilinear_arch.py deleted file mode 100644 index 52e0de88de8543cf4afdc3988c4cdfc7c7060687..0000000000000000000000000000000000000000 --- a/spaces/ali-ghamdan/gfp-Gans/gfpgan/archs/gfpgan_bilinear_arch.py +++ /dev/null @@ -1,312 +0,0 @@ -import math -import random -import torch -from basicsr.utils.registry import ARCH_REGISTRY -from torch import nn - -from .gfpganv1_arch import ResUpBlock -from .stylegan2_bilinear_arch import (ConvLayer, EqualConv2d, EqualLinear, ResBlock, ScaledLeakyReLU, - StyleGAN2GeneratorBilinear) - - -class StyleGAN2GeneratorBilinearSFT(StyleGAN2GeneratorBilinear): - """StyleGAN2 Generator with SFT modulation (Spatial Feature Transform). - - It is the bilinear version. It does not use the complicated UpFirDnSmooth function that is not friendly for - deployment. It can be easily converted to the clean version: StyleGAN2GeneratorCSFT. - - Args: - out_size (int): The spatial size of outputs. - num_style_feat (int): Channel number of style features. Default: 512. - num_mlp (int): Layer number of MLP style layers. Default: 8. - channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2. - lr_mlp (float): Learning rate multiplier for mlp layers. Default: 0.01. - narrow (float): The narrow ratio for channels. Default: 1. - sft_half (bool): Whether to apply SFT on half of the input channels. Default: False. - """ - - def __init__(self, - out_size, - num_style_feat=512, - num_mlp=8, - channel_multiplier=2, - lr_mlp=0.01, - narrow=1, - sft_half=False): - super(StyleGAN2GeneratorBilinearSFT, self).__init__( - out_size, - num_style_feat=num_style_feat, - num_mlp=num_mlp, - channel_multiplier=channel_multiplier, - lr_mlp=lr_mlp, - narrow=narrow) - self.sft_half = sft_half - - def forward(self, - styles, - conditions, - input_is_latent=False, - noise=None, - randomize_noise=True, - truncation=1, - truncation_latent=None, - inject_index=None, - return_latents=False): - """Forward function for StyleGAN2GeneratorBilinearSFT. - - Args: - styles (list[Tensor]): Sample codes of styles. - conditions (list[Tensor]): SFT conditions to generators. - input_is_latent (bool): Whether input is latent style. Default: False. - noise (Tensor | None): Input noise or None. Default: None. - randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. - truncation (float): The truncation ratio. Default: 1. - truncation_latent (Tensor | None): The truncation latent tensor. Default: None. - inject_index (int | None): The injection index for mixing noise. Default: None. - return_latents (bool): Whether to return style latents. Default: False. - """ - # style codes -> latents with Style MLP layer - if not input_is_latent: - styles = [self.style_mlp(s) for s in styles] - # noises - if noise is None: - if randomize_noise: - noise = [None] * self.num_layers # for each style conv layer - else: # use the stored noise - noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)] - # style truncation - if truncation < 1: - style_truncation = [] - for style in styles: - style_truncation.append(truncation_latent + truncation * (style - truncation_latent)) - styles = style_truncation - # get style latents with injection - if len(styles) == 1: - inject_index = self.num_latent - - if styles[0].ndim < 3: - # repeat latent code for all the layers - latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - else: # used for encoder with different latent code for each layer - latent = styles[0] - elif len(styles) == 2: # mixing noises - if inject_index is None: - inject_index = random.randint(1, self.num_latent - 1) - latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1) - latent = torch.cat([latent1, latent2], 1) - - # main generation - out = self.constant_input(latent.shape[0]) - out = self.style_conv1(out, latent[:, 0], noise=noise[0]) - skip = self.to_rgb1(out, latent[:, 1]) - - i = 1 - for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2], - noise[2::2], self.to_rgbs): - out = conv1(out, latent[:, i], noise=noise1) - - # the conditions may have fewer levels - if i < len(conditions): - # SFT part to combine the conditions - if self.sft_half: # only apply SFT to half of the channels - out_same, out_sft = torch.split(out, int(out.size(1) // 2), dim=1) - out_sft = out_sft * conditions[i - 1] + conditions[i] - out = torch.cat([out_same, out_sft], dim=1) - else: # apply SFT to all the channels - out = out * conditions[i - 1] + conditions[i] - - out = conv2(out, latent[:, i + 1], noise=noise2) - skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space - i += 2 - - image = skip - - if return_latents: - return image, latent - else: - return image, None - - -@ARCH_REGISTRY.register() -class GFPGANBilinear(nn.Module): - """The GFPGAN architecture: Unet + StyleGAN2 decoder with SFT. - - It is the bilinear version and it does not use the complicated UpFirDnSmooth function that is not friendly for - deployment. It can be easily converted to the clean version: GFPGANv1Clean. - - - Ref: GFP-GAN: Towards Real-World Blind Face Restoration with Generative Facial Prior. - - Args: - out_size (int): The spatial size of outputs. - num_style_feat (int): Channel number of style features. Default: 512. - channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2. - decoder_load_path (str): The path to the pre-trained decoder model (usually, the StyleGAN2). Default: None. - fix_decoder (bool): Whether to fix the decoder. Default: True. - - num_mlp (int): Layer number of MLP style layers. Default: 8. - lr_mlp (float): Learning rate multiplier for mlp layers. Default: 0.01. - input_is_latent (bool): Whether input is latent style. Default: False. - different_w (bool): Whether to use different latent w for different layers. Default: False. - narrow (float): The narrow ratio for channels. Default: 1. - sft_half (bool): Whether to apply SFT on half of the input channels. Default: False. - """ - - def __init__( - self, - out_size, - num_style_feat=512, - channel_multiplier=1, - decoder_load_path=None, - fix_decoder=True, - # for stylegan decoder - num_mlp=8, - lr_mlp=0.01, - input_is_latent=False, - different_w=False, - narrow=1, - sft_half=False): - - super(GFPGANBilinear, self).__init__() - self.input_is_latent = input_is_latent - self.different_w = different_w - self.num_style_feat = num_style_feat - - unet_narrow = narrow * 0.5 # by default, use a half of input channels - channels = { - '4': int(512 * unet_narrow), - '8': int(512 * unet_narrow), - '16': int(512 * unet_narrow), - '32': int(512 * unet_narrow), - '64': int(256 * channel_multiplier * unet_narrow), - '128': int(128 * channel_multiplier * unet_narrow), - '256': int(64 * channel_multiplier * unet_narrow), - '512': int(32 * channel_multiplier * unet_narrow), - '1024': int(16 * channel_multiplier * unet_narrow) - } - - self.log_size = int(math.log(out_size, 2)) - first_out_size = 2**(int(math.log(out_size, 2))) - - self.conv_body_first = ConvLayer(3, channels[f'{first_out_size}'], 1, bias=True, activate=True) - - # downsample - in_channels = channels[f'{first_out_size}'] - self.conv_body_down = nn.ModuleList() - for i in range(self.log_size, 2, -1): - out_channels = channels[f'{2**(i - 1)}'] - self.conv_body_down.append(ResBlock(in_channels, out_channels)) - in_channels = out_channels - - self.final_conv = ConvLayer(in_channels, channels['4'], 3, bias=True, activate=True) - - # upsample - in_channels = channels['4'] - self.conv_body_up = nn.ModuleList() - for i in range(3, self.log_size + 1): - out_channels = channels[f'{2**i}'] - self.conv_body_up.append(ResUpBlock(in_channels, out_channels)) - in_channels = out_channels - - # to RGB - self.toRGB = nn.ModuleList() - for i in range(3, self.log_size + 1): - self.toRGB.append(EqualConv2d(channels[f'{2**i}'], 3, 1, stride=1, padding=0, bias=True, bias_init_val=0)) - - if different_w: - linear_out_channel = (int(math.log(out_size, 2)) * 2 - 2) * num_style_feat - else: - linear_out_channel = num_style_feat - - self.final_linear = EqualLinear( - channels['4'] * 4 * 4, linear_out_channel, bias=True, bias_init_val=0, lr_mul=1, activation=None) - - # the decoder: stylegan2 generator with SFT modulations - self.stylegan_decoder = StyleGAN2GeneratorBilinearSFT( - out_size=out_size, - num_style_feat=num_style_feat, - num_mlp=num_mlp, - channel_multiplier=channel_multiplier, - lr_mlp=lr_mlp, - narrow=narrow, - sft_half=sft_half) - - # load pre-trained stylegan2 model if necessary - if decoder_load_path: - self.stylegan_decoder.load_state_dict( - torch.load(decoder_load_path, map_location=lambda storage, loc: storage)['params_ema']) - # fix decoder without updating params - if fix_decoder: - for _, param in self.stylegan_decoder.named_parameters(): - param.requires_grad = False - - # for SFT modulations (scale and shift) - self.condition_scale = nn.ModuleList() - self.condition_shift = nn.ModuleList() - for i in range(3, self.log_size + 1): - out_channels = channels[f'{2**i}'] - if sft_half: - sft_out_channels = out_channels - else: - sft_out_channels = out_channels * 2 - self.condition_scale.append( - nn.Sequential( - EqualConv2d(out_channels, out_channels, 3, stride=1, padding=1, bias=True, bias_init_val=0), - ScaledLeakyReLU(0.2), - EqualConv2d(out_channels, sft_out_channels, 3, stride=1, padding=1, bias=True, bias_init_val=1))) - self.condition_shift.append( - nn.Sequential( - EqualConv2d(out_channels, out_channels, 3, stride=1, padding=1, bias=True, bias_init_val=0), - ScaledLeakyReLU(0.2), - EqualConv2d(out_channels, sft_out_channels, 3, stride=1, padding=1, bias=True, bias_init_val=0))) - - def forward(self, x, return_latents=False, return_rgb=True, randomize_noise=True): - """Forward function for GFPGANBilinear. - - Args: - x (Tensor): Input images. - return_latents (bool): Whether to return style latents. Default: False. - return_rgb (bool): Whether return intermediate rgb images. Default: True. - randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. - """ - conditions = [] - unet_skips = [] - out_rgbs = [] - - # encoder - feat = self.conv_body_first(x) - for i in range(self.log_size - 2): - feat = self.conv_body_down[i](feat) - unet_skips.insert(0, feat) - - feat = self.final_conv(feat) - - # style code - style_code = self.final_linear(feat.view(feat.size(0), -1)) - if self.different_w: - style_code = style_code.view(style_code.size(0), -1, self.num_style_feat) - - # decode - for i in range(self.log_size - 2): - # add unet skip - feat = feat + unet_skips[i] - # ResUpLayer - feat = self.conv_body_up[i](feat) - # generate scale and shift for SFT layers - scale = self.condition_scale[i](feat) - conditions.append(scale.clone()) - shift = self.condition_shift[i](feat) - conditions.append(shift.clone()) - # generate rgb images - if return_rgb: - out_rgbs.append(self.toRGB[i](feat)) - - # decoder - image, _ = self.stylegan_decoder([style_code], - conditions, - return_latents=return_latents, - input_is_latent=self.input_is_latent, - randomize_noise=randomize_noise) - - return image, out_rgbs diff --git a/spaces/aliabd/SummerTime/dataset/dataset_loaders.py b/spaces/aliabd/SummerTime/dataset/dataset_loaders.py deleted file mode 100644 index f0f1e0637181447dcf76afdc0733009570ad58a9..0000000000000000000000000000000000000000 --- a/spaces/aliabd/SummerTime/dataset/dataset_loaders.py +++ /dev/null @@ -1,501 +0,0 @@ -from os import path -from tqdm import tqdm -from typing import List, Generator, Optional, Union - -from datasets import Dataset - -from dataset.st_dataset import SummInstance, SummDataset - - -# Set directory to load non_huggingface dataset scripts -FILE_DIRECTORY_PATH = path.dirname(path.realpath(__file__)) -BASE_NONHUGGINGFACE_DATASETS_PATH = path.join( - FILE_DIRECTORY_PATH, "non_huggingface_datasets_builders" -) - - -# Huggingface Datasets - - -class CnndmDataset(SummDataset): - """ - The CNN/DM dataset - """ - - dataset_name = "CNN/DailyMail" - - is_query_based = False - is_dialogue_based = False - is_multi_document = False - - huggingface_dataset = True - huggingface_page = "https://huggingface.co/datasets/cnn_dailymail" - - def __init__(self): - super().__init__( - dataset_args=( - "cnn_dailymail", - "3.0.0", - ) - ) - - def _process_data(self, data: Dataset) -> Generator[SummInstance, None, None]: - """ - Overrides the SummDataset '_process_data()' method - This method processes the data contained in the dataset - and puts each data instance into a SummInstance object - :param dataset: a train/validation/test dataset - :rtype: a generator yielding SummInstance objects - """ - for instance in tqdm(data): - article: str = instance["article"] - highlights: str = instance["highlights"] - summ_instance = SummInstance(source=article, summary=highlights) - - yield summ_instance - - -class MultinewsDataset(SummDataset): - """ - The Multi News dataset - """ - - dataset_name = "Multinews" - - is_query_based = False - is_dialogue_based = False - is_multi_document = True - - huggingface_dataset = True - huggingface_page = "https://huggingface.co/datasets/multi_news" - - def __init__(self): - super().__init__(dataset_args=("multi_news",)) - - def _process_data(self, data: Dataset) -> Generator[SummInstance, None, None]: - """ - Overrides the SummDataset '_process_data()' method - This method processes the data contained in the dataset - and puts each data instance into a SummInstance object - :param dataset: a train/validation/test dataset - :rtype: a generator yielding SummInstance objects - """ - for instance in tqdm(data): - document: list = [ - doc for doc in instance["document"].split("|||||") if doc - ] # removes the empty string generated - # since each doc ends with the delimiting token '|||||' - # the final doc creates an empty string - summary: str = instance["summary"] - summ_instance = SummInstance(source=document, summary=summary) - - yield summ_instance - - -class SamsumDataset(SummDataset): - """ - The SAMsum Dataset - """ - - dataset_name = "Samsum" - - is_query_based = False - is_dialogue_based = True - is_multi_document = False - - huggingface_dataset = True - huggingface_page = "https://huggingface.co/datasets/samsum" - - def __init__(self): - super().__init__(dataset_args=("samsum",)) - - def _process_data(self, data: Dataset) -> Generator[SummInstance, None, None]: - """ - Overrides the SummDataset '_process_data()' method - This method processes the data contained in the dataset - and puts each data instance into a SummInstance object - :param dataset: a train/validation/test dataset - :rtype: a generator yielding SummInstance objects - """ - for instance in tqdm(data): - dialogue: List = instance["dialogue"].split( - "\r\n" - ) # split each dialogue into a list of strings such as - # ["speaker1 : utter..", "speaker2 : utter..."] - summary: str = instance["summary"] - summ_instance = SummInstance(source=dialogue, summary=summary) - - yield summ_instance - - -class XsumDataset(SummDataset): - """ - The Xsum Dataset - """ - - dataset_name = "Xsum" - - huggingface_dataset = True - huggingface_page = "https://huggingface.co/datasets/xsum" - - is_query_based = False - is_dialogue_based = False - is_multi_document = False - - def __init__(self): - super().__init__(dataset_args=("xsum",)) - - def _process_data(self, data: Dataset) -> Generator[SummInstance, None, None]: - """ - Overrides the SummDataset '_process_data()' method - This method processes the data contained in the dataset - and puts each data instance into a SummInstance object - :param dataset: a train/validation/test dataset - :rtype: a generator yielding SummInstance objects - """ - for instance in tqdm(data): - document: List = instance["document"] - summary: str = instance["summary"] - summ_instance = SummInstance(source=document, summary=summary) - - yield summ_instance - - -class PubmedqaDataset(SummDataset): - """ - The Pubmed QA dataset - """ - - dataset_name = "Pubmedqa" - - is_query_based = True - is_dialogue_based = False - is_multi_document = False - - huggingface_dataset = True - huggingface_page = "https://huggingface.co/datasets/pubmed_qa" - - def __init__(self, seed=None): - super().__init__( - dataset_args=( - "pubmed_qa", - "pqa_artificial", - ) - ) - - def _process_data(self, data: Dataset) -> Generator[SummInstance, None, None]: - """ - Overrides the SummDataset '_process_data()' method - This method processes the data contained in the dataset - and puts each data instance into a SummInstance object - :param dataset: a train/validation/test dataset - :rtype: a generator yielding SummInstance objects - """ - for instance in tqdm(data): - context: str = " ".join(instance["context"]["contexts"]) - answer: str = instance["long_answer"] - query: str = instance["question"] - summ_instance = SummInstance(source=context, summary=answer, query=query) - - yield summ_instance - - -class MlsumDataset(SummDataset): - """ - The MLsum Dataset - A multi-lingual dataset featuring 5 languages - Includes 1.5 million news articles and their corresponding summaries - - "de" - German - "es" - Spanish - "fr" - French - "ru" - Russian - "tu" - Turkish - """ - - dataset_name = "MlSum" - - is_query_based = False - is_dialogue_based = False - is_multi_document = False - - huggingface_dataset = True - huggingface_page = "https://huggingface.co/datasets/mlsum" - supported_languages = ["de", "es", "fr", "ru", "tu"] - - mlsum_instantiation_guide = """The languages supported for the Mlsum Dataset are: - de - German - es - Spanish - fr - French - ru - Russian - tu - Turkish - - Examples to instantiate the dataset: - 1. Dataset with only one language - dataset = MlsumDataset({language_token}) - dataset = MlsumDataset("es") - dataset = MlsumDataset("tu")... - - 2. Dataset with a multiple languages - dataset = MlsumDataset({list of language_token}) - dataset = MlsumDataset(["es","de"]) - dataset = MlsumDataset(["es","de", "tu"])... - - 3. Dataset with all supported languages (default) - dataset = MlsumDataset(all) - dataset = MlsumDataset() - """ - - def __init__(self, languages: Optional[Union[str, List[str]]] = "all"): - super().__init__(dataset_args=(languages,)) - - def _load_dataset_safe(self, languages: Optional[Union[str, List[str]]]): - """ - Overrides the parent class method - Method loads multiple datasets of different languages provided in :param languages: - It then concatenates these datasets into one combined dataset - :rtype: datasetDict containing the combined dataset - :param languages: Optional, either a string or list of strings specifying the languages - to load - """ - print(MlsumDataset.mlsum_instantiation_guide) - - # Choose languages to download articles - if languages == "all": - selected_languages = MlsumDataset.supported_languages - elif isinstance(languages, list): - for language in languages: - assert self.is_supported(language) - selected_languages = languages - else: - assert self.is_supported(languages) - selected_languages = [languages] - - # Concatenate selected languaeges into one dataset - language_datasets = [] - for language in selected_languages: - dataset = super()._load_dataset_safe( - "mlsum", - language, - ) - - language_datasets.append(dataset) - - mlsum_dataset = self._concatenate_dataset_dicts(language_datasets) - - return mlsum_dataset - - def _process_data(self, data: Dataset) -> Generator[SummInstance, None, None]: - """ - Overrides the SummDataset '_process_data()' method - This method processes the data contained in the dataset - and puts each data instance into a SummInstance object - :param dataset: a train/validation/test dataset - :rtype: a generator yielding SummInstance objects - """ - for instance in tqdm(data): - article: List = instance["text"] - summary: str = instance["summary"] - summ_instance = SummInstance(source=article, summary=summary) - - yield summ_instance - - def is_supported(self, language: str): - """ - Checks whether the requested langues is supported - :param language: string containing the requested language - :rtype bool: - """ - if language not in MlsumDataset.supported_languages: - print(MlsumDataset.mlsum_instantiation_guide) - raise ValueError( - f"The language(s): '{language}' entered is not supported. See above message for usage info" - ) - else: - return True - - -# Non-huggingface datasets - - -class ScisummnetDataset(SummDataset): - """ - The SciSummNet dataset. As a dataset not included by huggingface, we need to do manually download, set basic - information for the dataset - """ - - dataset_name = "ScisummNet" - - version = "1.1.0" - description = ( - "A summary of scientific papers should ideally incorporate the impact of the papers on the " - "research community reflected by citations. To facilitate research in citation-aware scientific " - "paper summarization (Scisumm), the CL-Scisumm shared task has been organized since 2014 for " - "papers in the computational linguistics and NLP domain." - ) - - is_dialogue_based = False - is_multi_document = False - is_query_based = False - - huggingface_dataset = False - builder_script_path = path.join( - BASE_NONHUGGINGFACE_DATASETS_PATH, dataset_name.lower() + ".py" - ) - - def __init__(self, seed=None): - super().__init__() - - def _process_data(self, data: Dataset) -> Generator[SummInstance, None, None]: - """ - Overrides the SummDataset '_process_data()' method - This method processes the data contained in the dataset - and puts each data instance into a SummInstance object - :param dataset: a train/validation/test dataset - :rtype: a generator yielding SummInstance objects - """ - for instance in tqdm(data): - docs: List = [ - instance["document_xml"], - instance["citing_sentences_annotated.json"], - ] - summary: str = instance["summary"] - summ_instance = SummInstance(source=docs, summary=summary) - - yield summ_instance - - -class SummscreenDataset(SummDataset): - """ - The SummScreen dataset. As a dataset not included by huggingface, we need to do manually download, set basic - information for the dataset - """ - - dataset_name = "Summscreen" - - version = "1.1.0" - is_dialogue_based = True - is_multi_document = False - is_query_based = False - - huggingface_dataset = False - builder_script_path = path.join( - BASE_NONHUGGINGFACE_DATASETS_PATH, dataset_name.lower() + ".py" - ) - - def __init__(self, seed=None): - super().__init__() - - def _process_data(self, data: Dataset) -> Generator[SummInstance, None, None]: - """ - Overrides the SummDataset '_process_data()' method - This method processes the data contained in the dataset - and puts each data instance into a SummInstance object - :param dataset: a train/validation/test dataset - :rtype: a generator yielding SummInstance objects - """ - for instance in tqdm(data): - transcript: List = instance[ - "transcript" - ] # convert string into a list of string dialogues - recap: str = instance["recap"] - summ_instance = SummInstance(source=transcript, summary=recap) - - yield summ_instance - - -class QMsumDataset(SummDataset): - """ - QMSum Dataset - """ - - dataset_name = "QMsum" - description = """ - QMSum is a new human-annotated benchmark for query-based multi-domain meeting summarization task, - which consists of 1,808 query-summary pairs over 232 meetings in multiple domains. - """ - - is_dialogue_based = True - is_multi_document = False - is_query_based = True - - huggingface_dataset = False - builder_script_path = path.join( - BASE_NONHUGGINGFACE_DATASETS_PATH, dataset_name.lower() + ".py" - ) - - def __init__(self): - super().__init__() - - def _process_data(self, data: Dataset) -> Generator[SummInstance, None, None]: - """ - Overrides the SummDataset '_process_data()' method - This method processes the data contained in the dataset - and puts each data instance into a SummInstance object - :param dataset: a train/validation/test dataset - :rtype: a generator yielding SummInstance objects - """ - for instance in tqdm(data): - for query_set in ( - instance["general_query_list"] + instance["specific_query_list"] - ): - meeting: List = [ - utterance["speaker"] + " : " + utterance["content"] - for utterance in instance["meeting_transcripts"] - ] - query: str = query_set["query"] - summary: str = query_set["answer"] - summ_instance = SummInstance( - source=meeting, summary=summary, query=query - ) - - yield summ_instance - - -class ArxivDataset(SummDataset): - """ - The Arxiv Dataset - """ - - dataset_name = "Arxiv_longsummarization" - description = """ - A summarization dataset comprised of pairs of scientific papers. - The dataset provides a challenging testbed for abstractive summarization. - It contains papers and their abstracts. - """ - - is_dialogue_based = False - is_multi_document = False - is_query_based = False - - huggingface_dataset = False - builder_script_path = path.join( - BASE_NONHUGGINGFACE_DATASETS_PATH, dataset_name.lower() + ".py" - ) - - def __init__(self): - - print( - "*****************", - "***Attention***", - "This dataset is quite large (approx 5Gb and will need about 15 Gb for the extraction process", - "Cancel/interrupt the download if size and time constraints will not be met", - "*****************", - sep="\n", - ) - - super().__init__() - - def _process_data(self, data: Dataset) -> Generator[SummInstance, None, None]: - """ - Overrides the SummDataset '_process_data()' method - This method processes the data contained in the dataset - and puts each data instance into a SummInstance object - :param dataset: a train/validation/test dataset - :rtype: a generator yielding SummInstance objects - """ - for instance in tqdm(data): - article: List = instance["article_text"] - abstract: str = " ".join(instance["abstract_text"]) - summ_instance = SummInstance(source=article, summary=abstract) - - yield summ_instance diff --git a/spaces/almakedon/faster-whisper-webui/src/prompts/jsonPromptStrategy.py b/spaces/almakedon/faster-whisper-webui/src/prompts/jsonPromptStrategy.py deleted file mode 100644 index 25aa938adc3c0d5776cd11e0d123195bb6e69aeb..0000000000000000000000000000000000000000 --- a/spaces/almakedon/faster-whisper-webui/src/prompts/jsonPromptStrategy.py +++ /dev/null @@ -1,49 +0,0 @@ -import json -from typing import Dict -from src.prompts.abstractPromptStrategy import AbstractPromptStrategy - - -class JsonPromptSegment(): - def __init__(self, segment_index: int, prompt: str, format_prompt: bool = False): - self.prompt = prompt - self.segment_index = segment_index - self.format_prompt = format_prompt - -class JsonPromptStrategy(AbstractPromptStrategy): - def __init__(self, initial_json_prompt: str): - """ - Parameters - ---------- - initial_json_prompt: str - The initial prompts for each segment in JSON form. - - Format: - [ - {"segment_index": 0, "prompt": "Hello, how are you?"}, - {"segment_index": 1, "prompt": "I'm doing well, how are you?"}, - {"segment_index": 2, "prompt": "{0} Fine, thank you.", "format_prompt": true} - ] - - """ - parsed_json = json.loads(initial_json_prompt) - self.segment_lookup: Dict[str, JsonPromptSegment] = dict() - - for prompt_entry in parsed_json: - segment_index = prompt_entry["segment_index"] - prompt = prompt_entry["prompt"] - format_prompt = prompt_entry.get("format_prompt", False) - self.segment_lookup[str(segment_index)] = JsonPromptSegment(segment_index, prompt, format_prompt) - - def get_segment_prompt(self, segment_index: int, whisper_prompt: str, detected_language: str) -> str: - # Lookup prompt - prompt = self.segment_lookup.get(str(segment_index), None) - - if (prompt is None): - # No prompt found, return whisper prompt - print(f"Could not find prompt for segment {segment_index}, returning whisper prompt") - return whisper_prompt - - if (prompt.format_prompt): - return prompt.prompt.format(whisper_prompt) - else: - return self._concat_prompt(prompt.prompt, whisper_prompt) diff --git a/spaces/alphunt/diffdock-alphunt-demo/esm/esm/modules.py b/spaces/alphunt/diffdock-alphunt-demo/esm/esm/modules.py deleted file mode 100644 index dc7b1ae2ef4caa1f42dc400ed9a7fcc33ca348ad..0000000000000000000000000000000000000000 --- a/spaces/alphunt/diffdock-alphunt-demo/esm/esm/modules.py +++ /dev/null @@ -1,418 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math -from typing import Optional - -import torch -import torch.nn as nn -import torch.nn.functional as F - -from .multihead_attention import MultiheadAttention # noqa -from .axial_attention import ColumnSelfAttention, RowSelfAttention - - -def gelu(x): - """Implementation of the gelu activation function. - - For information: OpenAI GPT's gelu is slightly different - (and gives slightly different results): - 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) - """ - return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) - - -def symmetrize(x): - "Make layer symmetric in final two dimensions, used for contact prediction." - return x + x.transpose(-1, -2) - - -def apc(x): - "Perform average product correct, used for contact prediction." - a1 = x.sum(-1, keepdims=True) - a2 = x.sum(-2, keepdims=True) - a12 = x.sum((-1, -2), keepdims=True) - - avg = a1 * a2 - avg.div_(a12) # in-place to reduce memory - normalized = x - avg - return normalized - - -class ESM1LayerNorm(nn.Module): - def __init__(self, hidden_size, eps=1e-12, affine=True): - """Construct a layernorm layer in the TF style (eps inside the sqrt).""" - super().__init__() - self.hidden_size = (hidden_size,) if isinstance(hidden_size, int) else tuple(hidden_size) - self.eps = eps - self.affine = bool(affine) - if self.affine: - self.weight = nn.Parameter(torch.ones(hidden_size)) - self.bias = nn.Parameter(torch.zeros(hidden_size)) - else: - self.weight, self.bias = None, None - - def forward(self, x): - dims = tuple(-(i + 1) for i in range(len(self.hidden_size))) - means = x.mean(dims, keepdim=True) - x_zeromean = x - means - variances = x_zeromean.pow(2).mean(dims, keepdim=True) - x = x_zeromean / torch.sqrt(variances + self.eps) - if self.affine: - x = (self.weight * x) + self.bias - return x - - -try: - from apex.normalization import FusedLayerNorm as _FusedLayerNorm - - class ESM1bLayerNorm(_FusedLayerNorm): - @torch.jit.unused - def forward(self, x): - if not x.is_cuda: - return super().forward(x) - else: - with torch.cuda.device(x.device): - return super().forward(x) - -except ImportError: - from torch.nn import LayerNorm as ESM1bLayerNorm - - -class TransformerLayer(nn.Module): - """Transformer layer block.""" - - def __init__( - self, - embed_dim, - ffn_embed_dim, - attention_heads, - add_bias_kv=True, - use_esm1b_layer_norm=False, - use_rotary_embeddings: bool = False, - ): - super().__init__() - self.embed_dim = embed_dim - self.ffn_embed_dim = ffn_embed_dim - self.attention_heads = attention_heads - self.use_rotary_embeddings = use_rotary_embeddings - self._init_submodules(add_bias_kv, use_esm1b_layer_norm) - - def _init_submodules(self, add_bias_kv, use_esm1b_layer_norm): - BertLayerNorm = ESM1bLayerNorm if use_esm1b_layer_norm else ESM1LayerNorm - - self.self_attn = MultiheadAttention( - self.embed_dim, - self.attention_heads, - add_bias_kv=add_bias_kv, - add_zero_attn=False, - use_rotary_embeddings=self.use_rotary_embeddings, - ) - self.self_attn_layer_norm = BertLayerNorm(self.embed_dim) - - self.fc1 = nn.Linear(self.embed_dim, self.ffn_embed_dim) - self.fc2 = nn.Linear(self.ffn_embed_dim, self.embed_dim) - - self.final_layer_norm = BertLayerNorm(self.embed_dim) - - def forward( - self, x, self_attn_mask=None, self_attn_padding_mask=None, need_head_weights=False - ): - residual = x - x = self.self_attn_layer_norm(x) - x, attn = self.self_attn( - query=x, - key=x, - value=x, - key_padding_mask=self_attn_padding_mask, - need_weights=True, - need_head_weights=need_head_weights, - attn_mask=self_attn_mask, - ) - x = residual + x - - residual = x - x = self.final_layer_norm(x) - x = gelu(self.fc1(x)) - x = self.fc2(x) - x = residual + x - - return x, attn - - -class AxialTransformerLayer(nn.Module): - """Implements an Axial MSA Transformer block.""" - - def __init__( - self, - embedding_dim: int = 768, - ffn_embedding_dim: int = 3072, - num_attention_heads: int = 8, - dropout: float = 0.1, - attention_dropout: float = 0.1, - activation_dropout: float = 0.1, - max_tokens_per_msa: int = 2**14, - ) -> None: - super().__init__() - - # Initialize parameters - self.embedding_dim = embedding_dim - self.dropout_prob = dropout - - row_self_attention = RowSelfAttention( - embedding_dim, - num_attention_heads, - dropout=dropout, - max_tokens_per_msa=max_tokens_per_msa, - ) - - column_self_attention = ColumnSelfAttention( - embedding_dim, - num_attention_heads, - dropout=dropout, - max_tokens_per_msa=max_tokens_per_msa, - ) - - feed_forward_layer = FeedForwardNetwork( - embedding_dim, - ffn_embedding_dim, - activation_dropout=activation_dropout, - max_tokens_per_msa=max_tokens_per_msa, - ) - - self.row_self_attention = self.build_residual(row_self_attention) - self.column_self_attention = self.build_residual(column_self_attention) - self.feed_forward_layer = self.build_residual(feed_forward_layer) - - def build_residual(self, layer: nn.Module): - return NormalizedResidualBlock( - layer, - self.embedding_dim, - self.dropout_prob, - ) - - def forward( - self, - x: torch.Tensor, - self_attn_mask: Optional[torch.Tensor] = None, - self_attn_padding_mask: Optional[torch.Tensor] = None, - need_head_weights: bool = False, - ): - """ - LayerNorm is applied either before or after the self-attention/ffn - modules similar to the original Transformer implementation. - """ - x, row_attn = self.row_self_attention( - x, - self_attn_mask=self_attn_mask, - self_attn_padding_mask=self_attn_padding_mask, - ) - x, column_attn = self.column_self_attention( - x, - self_attn_mask=self_attn_mask, - self_attn_padding_mask=self_attn_padding_mask, - ) - x = self.feed_forward_layer(x) - if need_head_weights: - return x, column_attn, row_attn - else: - return x - - -class LearnedPositionalEmbedding(nn.Embedding): - """ - This module learns positional embeddings up to a fixed maximum size. - Padding ids are ignored by either offsetting based on padding_idx - or by setting padding_idx to None and ensuring that the appropriate - position ids are passed to the forward function. - """ - - def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int): - if padding_idx is not None: - num_embeddings_ = num_embeddings + padding_idx + 1 - else: - num_embeddings_ = num_embeddings - super().__init__(num_embeddings_, embedding_dim, padding_idx) - self.max_positions = num_embeddings - - def forward(self, input: torch.Tensor): - """Input is expected to be of size [bsz x seqlen].""" - if input.size(1) > self.max_positions: - raise ValueError( - f"Sequence length {input.size(1)} above maximum " - f" sequence length of {self.max_positions}" - ) - mask = input.ne(self.padding_idx).int() - positions = (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + self.padding_idx - return F.embedding( - positions, - self.weight, - self.padding_idx, - self.max_norm, - self.norm_type, - self.scale_grad_by_freq, - self.sparse, - ) - - -class SinusoidalPositionalEmbedding(nn.Module): - def __init__(self, embed_dim, padding_idx, learned=False): - super().__init__() - self.embed_dim = embed_dim - self.padding_idx = padding_idx - self.register_buffer("_float_tensor", torch.FloatTensor(1)) - self.weights = None - - def forward(self, x): - bsz, seq_len = x.shape - max_pos = self.padding_idx + 1 + seq_len - if self.weights is None or max_pos > self.weights.size(0): - self.weights = self.get_embedding(max_pos) - self.weights = self.weights.type_as(self._float_tensor) - - positions = self.make_positions(x) - return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach() - - def make_positions(self, x): - mask = x.ne(self.padding_idx) - range_buf = torch.arange(x.size(1), device=x.device).expand_as(x) + self.padding_idx + 1 - positions = range_buf.expand_as(x) - return positions * mask.long() + self.padding_idx * (1 - mask.long()) - - def get_embedding(self, num_embeddings): - half_dim = self.embed_dim // 2 - emb = math.log(10000) / (half_dim - 1) - emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) - emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) - emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) - if self.embed_dim % 2 == 1: - # zero pad - emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) - if self.padding_idx is not None: - emb[self.padding_idx, :] = 0 - return emb - - -class RobertaLMHead(nn.Module): - """Head for masked language modeling.""" - - def __init__(self, embed_dim, output_dim, weight): - super().__init__() - self.dense = nn.Linear(embed_dim, embed_dim) - self.layer_norm = ESM1bLayerNorm(embed_dim) - self.weight = weight - self.bias = nn.Parameter(torch.zeros(output_dim)) - - def forward(self, features): - x = self.dense(features) - x = gelu(x) - x = self.layer_norm(x) - # project back to size of vocabulary with bias - x = F.linear(x, self.weight) + self.bias - return x - - -class ContactPredictionHead(nn.Module): - """Performs symmetrization, apc, and computes a logistic regression on the output features""" - - def __init__( - self, - in_features: int, - prepend_bos: bool, - append_eos: bool, - bias=True, - eos_idx: Optional[int] = None, - ): - super().__init__() - self.in_features = in_features - self.prepend_bos = prepend_bos - self.append_eos = append_eos - if append_eos and eos_idx is None: - raise ValueError("Using an alphabet with eos token, but no eos token was passed in.") - self.eos_idx = eos_idx - self.regression = nn.Linear(in_features, 1, bias) - self.activation = nn.Sigmoid() - - def forward(self, tokens, attentions): - # remove eos token attentions - if self.append_eos: - eos_mask = tokens.ne(self.eos_idx).to(attentions) - eos_mask = eos_mask.unsqueeze(1) * eos_mask.unsqueeze(2) - attentions = attentions * eos_mask[:, None, None, :, :] - attentions = attentions[..., :-1, :-1] - # remove cls token attentions - if self.prepend_bos: - attentions = attentions[..., 1:, 1:] - batch_size, layers, heads, seqlen, _ = attentions.size() - attentions = attentions.view(batch_size, layers * heads, seqlen, seqlen) - - # features: B x C x T x T - attentions = attentions.to( - self.regression.weight.device - ) # attentions always float32, may need to convert to float16 - attentions = apc(symmetrize(attentions)) - attentions = attentions.permute(0, 2, 3, 1) - return self.activation(self.regression(attentions).squeeze(3)) - - -class NormalizedResidualBlock(nn.Module): - def __init__( - self, - layer: nn.Module, - embedding_dim: int, - dropout: float = 0.1, - ): - super().__init__() - self.embedding_dim = embedding_dim - - self.layer = layer - self.dropout_module = nn.Dropout( - dropout, - ) - self.layer_norm = ESM1bLayerNorm(self.embedding_dim) - - def forward(self, x, *args, **kwargs): - residual = x - x = self.layer_norm(x) - outputs = self.layer(x, *args, **kwargs) - if isinstance(outputs, tuple): - x, *out = outputs - else: - x = outputs - out = None - - x = self.dropout_module(x) - x = residual + x - - if out is not None: - return (x,) + tuple(out) - else: - return x - - -class FeedForwardNetwork(nn.Module): - def __init__( - self, - embedding_dim: int, - ffn_embedding_dim: int, - activation_dropout: float = 0.1, - max_tokens_per_msa: int = 2**14, - ): - super().__init__() - self.embedding_dim = embedding_dim - self.ffn_embedding_dim = ffn_embedding_dim - self.max_tokens_per_msa = max_tokens_per_msa - self.activation_fn = nn.GELU() - self.activation_dropout_module = nn.Dropout( - activation_dropout, - ) - self.fc1 = nn.Linear(embedding_dim, ffn_embedding_dim) - self.fc2 = nn.Linear(ffn_embedding_dim, embedding_dim) - - def forward(self, x): - x = self.activation_fn(self.fc1(x)) - x = self.activation_dropout_module(x) - x = self.fc2(x) - return x diff --git a/spaces/amirDev/crowd-counting-p2p/models/matcher.py b/spaces/amirDev/crowd-counting-p2p/models/matcher.py deleted file mode 100644 index 85b86fdf8e1fb8560a291195169f1d3f14d6cd97..0000000000000000000000000000000000000000 --- a/spaces/amirDev/crowd-counting-p2p/models/matcher.py +++ /dev/null @@ -1,83 +0,0 @@ - -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Mostly copy-paste from DETR (https://github.com/facebookresearch/detr). -""" -import torch -from scipy.optimize import linear_sum_assignment -from torch import nn - - -class HungarianMatcher_Crowd(nn.Module): - """This class computes an assignment between the targets and the predictions of the network - - For efficiency reasons, the targets don't include the no_object. Because of this, in general, - there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, - while the others are un-matched (and thus treated as non-objects). - """ - - def __init__(self, cost_class: float = 1, cost_point: float = 1): - """Creates the matcher - - Params: - cost_class: This is the relative weight of the foreground object - cost_point: This is the relative weight of the L1 error of the points coordinates in the matching cost - """ - super().__init__() - self.cost_class = cost_class - self.cost_point = cost_point - assert cost_class != 0 or cost_point != 0, "all costs cant be 0" - - @torch.no_grad() - def forward(self, outputs, targets): - """ Performs the matching - - Params: - outputs: This is a dict that contains at least these entries: - "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits - "points": Tensor of dim [batch_size, num_queries, 2] with the predicted point coordinates - - targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: - "labels": Tensor of dim [num_target_points] (where num_target_points is the number of ground-truth - objects in the target) containing the class labels - "points": Tensor of dim [num_target_points, 2] containing the target point coordinates - - Returns: - A list of size batch_size, containing tuples of (index_i, index_j) where: - - index_i is the indices of the selected predictions (in order) - - index_j is the indices of the corresponding selected targets (in order) - For each batch element, it holds: - len(index_i) = len(index_j) = min(num_queries, num_target_points) - """ - bs, num_queries = outputs["pred_logits"].shape[:2] - - # We flatten to compute the cost matrices in a batch - out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes] - out_points = outputs["pred_points"].flatten(0, 1) # [batch_size * num_queries, 2] - - # Also concat the target labels and points - # tgt_ids = torch.cat([v["labels"] for v in targets]) - tgt_ids = torch.cat([v["labels"] for v in targets]) - tgt_points = torch.cat([v["point"] for v in targets]) - - # Compute the classification cost. Contrary to the loss, we don't use the NLL, - # but approximate it in 1 - proba[target class]. - # The 1 is a constant that doesn't change the matching, it can be ommitted. - cost_class = -out_prob[:, tgt_ids] - - # Compute the L2 cost between point - cost_point = torch.cdist(out_points, tgt_points, p=2) - - # Compute the giou cost between point - - # Final cost matrix - C = self.cost_point * cost_point + self.cost_class * cost_class - C = C.view(bs, num_queries, -1).cpu() - - sizes = [len(v["point"]) for v in targets] - indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))] - return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] - - -def build_matcher_crowd(args): - return HungarianMatcher_Crowd(cost_class=args.set_cost_class, cost_point=args.set_cost_point) diff --git a/spaces/anaclaudia13ct/insect_detection/utils/segment/__init__.py b/spaces/anaclaudia13ct/insect_detection/utils/segment/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/architext/Architext_deployed/app.py b/spaces/architext/Architext_deployed/app.py deleted file mode 100644 index aa31059252e7642fc0aadb540afc728a36b17535..0000000000000000000000000000000000000000 --- a/spaces/architext/Architext_deployed/app.py +++ /dev/null @@ -1,267 +0,0 @@ -from pathlib import Path -from num2words import num2words -import numpy as np -import os -import random -import re -import torch -import json -from shapely.geometry.polygon import Polygon -from shapely.affinity import scale -from PIL import Image, ImageDraw, ImageOps, ImageFilter, ImageFont, ImageColor - -os.system('pip3 install gradio==2.7.5') -import gradio as gr - -from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM - -finetuned = AutoModelForCausalLM.from_pretrained('model') -tokenizer = AutoTokenizer.from_pretrained('gpt2') - -device = "cuda:0" if torch.cuda.is_available() else "cpu" -print(device) -finetuned = finetuned.to(device) - -# Utility functions - -def containsNumber(value): - for character in value: - if character.isdigit(): - return True - return False - -def creativity(intensity): - if(intensity == 'Low'): - top_p = 0.95 - top_k = 10 - elif(intensity == 'Medium'): - top_p = 0.9 - top_k = 50 - if(intensity == 'High'): - top_p = 0.85 - top_k = 100 - return top_p, top_k - -housegan_labels = {"living_room": 1, "kitchen": 2, "bedroom": 3, "bathroom": 4, "missing": 5, "closet": 6, - "balcony": 7, "corridor": 8, "dining_room": 9, "laundry_room": 10} - -architext_colors = [[0, 0, 0], [249, 222, 182], [195, 209, 217], [250, 120, 128], [126, 202, 234], [190, 0, 198], [255, 255, 255], - [6, 53, 17], [17, 33, 58], [132, 151, 246], [197, 203, 159], [6, 53, 17],] - -regex = re.compile(".*?\((.*?)\)") - -def draw_polygons(polygons, colors, im_size=(512, 512), b_color="white", fpath=None): - image = Image.new("RGBA", im_size, color="white") - draw = ImageDraw.Draw(image) - for poly, color, in zip(polygons, colors): - #get initial polygon coordinates - xy = poly.exterior.xy - coords = np.dstack((xy[1], xy[0])).flatten() - # draw it on canvas, with the appropriate colors - draw.polygon(list(coords), fill=(0, 0, 0)) - #get inner polygon coordinates - small_poly = poly.buffer(-1, resolution=32, cap_style=2, join_style=2, mitre_limit=5.0) - if small_poly.geom_type == 'MultiPolygon': - mycoordslist = [list(x.exterior.coords) for x in small_poly] - for coord in mycoordslist: - coords = np.dstack((np.array(coord)[:,1], np.array(coord)[:, 0])).flatten() - draw.polygon(list(coords), fill=tuple(color)) - elif poly.geom_type == 'Polygon': - #get inner polygon coordinates - xy2 = small_poly.exterior.xy - coords2 = np.dstack((xy2[1], xy2[0])).flatten() - # draw it on canvas, with the appropriate colors - draw.polygon(list(coords2), fill=tuple(color)) - image = image.transpose(Image.FLIP_TOP_BOTTOM) - if(fpath): - image.save(fpath, quality=100, subsampling=0) - return draw, image - -def prompt_to_layout(user_prompt, intensity, fpath=None): - if(containsNumber(user_prompt) == True): - spaced_prompt = user_prompt.split(' ') - new_prompt = ' '.join([word if word.isdigit() == False else num2words(int(word)).lower() for word in spaced_prompt]) - model_prompt = '[User prompt] {} [Layout]'.format(new_prompt) - top_p, top_k = creativity(intensity) - model_prompt = '[User prompt] {} [Layout]'.format(user_prompt) - input_ids = tokenizer(model_prompt, return_tensors='pt').to(device) - output = finetuned.generate(**input_ids, do_sample=True, top_p=top_p, top_k=top_k, - eos_token_id=50256, max_length=400) - output = tokenizer.batch_decode(output, skip_special_tokens=True) - layout = output[0].split('[User prompt]')[1].split('[Layout] ')[1].split(', ') - spaces = [txt.split(':')[0] for txt in layout] - coords = [txt.split(':')[1].rstrip() for txt in layout] - coordinates = [re.findall(regex, coord) for coord in coords] - - num_coords = [] - for coord in coordinates: - temp = [] - for xy in coord: - numbers = xy.split(',') - temp.append(tuple([int(num)/14.2 for num in numbers])) - num_coords.append(temp) - - new_spaces = [] - for i, v in enumerate(spaces): - totalcount = spaces.count(v) - count = spaces[:i].count(v) - new_spaces.append(v + str(count + 1) if totalcount > 1 else v) - - out_dict = dict(zip(new_spaces, num_coords)) - out_dict = json.dumps(out_dict) - - polygons = [] - for coord in coordinates: - polygons.append([point.split(',') for point in coord]) - geom = [] - for poly in polygons: - scaled_poly = scale(Polygon(np.array(poly, dtype=int)), xfact=2, yfact=2, origin=(0,0)) - geom.append(scaled_poly) - colors = [architext_colors[housegan_labels[space]] for space in spaces] - _, im = draw_polygons(geom, colors, fpath=fpath) - html = '' - legend = Image.open("labels.png") - imgs_comb = np.vstack([im, legend]) - imgs_comb = Image.fromarray(imgs_comb) - return imgs_comb, out_dict - - -# Gradio App - -custom_css=""" -@import url("https://use.typekit.net/nid3pfr.css"); -.gradio_wrapper .gradio_bg[is_embedded=false] { - min-height: 80%; -} - -.gradio_wrapper .gradio_bg[is_embedded=false] .gradio_page { - display: flex; - width: 100vw; - min-height: 50vh; - flex-direction: column; - justify-content: center; - align-items: center; - margin: 0px; - max-width: 100vw; - background: #FFFFFF; -} - -.gradio_wrapper .gradio_bg[is_embedded=false] .gradio_page .content { - padding: 0px; - margin: 0px; -} - -.gradio_interface { - width: 100vw; - max-width: 1500px; -} - -.gradio_interface .panel:nth-child(2) .component:nth-child(3) { - display:none -} - -.gradio_wrapper .gradio_bg[theme=default] .panel_buttons { - justify-content: flex-end; -} - -.gradio_wrapper .gradio_bg[theme=default] .panel_button { - flex: 0 0 0; - min-width: 150px; -} - -.gradio_wrapper .gradio_bg[theme=default] .gradio_interface .panel_button.submit { - background: #11213A; - border-radius: 5px; - color: #FFFFFF; - text-transform: uppercase; - min-width: 150px; - height: 4em; - letter-spacing: 0.15em; - flex: 0 0 0; -} -.gradio_wrapper .gradio_bg[theme=default] .gradio_interface .panel_button.submit:hover { - background: #000000; -} - -.input_text:focus { - border-color: #FA7880; -} -.gradio_wrapper .gradio_bg[theme=default] .gradio_interface .input_text input, -.gradio_wrapper .gradio_bg[theme=default] .gradio_interface .input_text textarea { - font: 200 45px garamond-premier-pro-display, serif; - line-height: 110%; - color: #11213A; - border-radius: 5px; - padding: 15px; - border: none; - background: #F2F4F4; -} -.input_text textarea:focus-visible { - outline: none; -} -.gradio_wrapper .gradio_bg[theme=default] .gradio_interface .input_radio .radio_item.selected { - background-color: #11213A; -} -.gradio_wrapper .gradio_bg[theme=default] .gradio_interface .input_radio .selected .radio_circle { - border-color: #4365c4; -} -.gradio_wrapper .gradio_bg[theme=default] .gradio_interface .output_image { - width: 100%; - height: 40vw; - max-height: 630px; -} -.gradio_wrapper .gradio_bg[theme=default] .gradio_interface .output_image .image_preview_holder { - background: transparent; -} -.panel:nth-child(1) { - margin-left: 50px; - margin-right: 50px; - margin-bottom: 80px; - max-width: 750px; -} -.panel { - background: transparent; -} -.gradio_wrapper .gradio_bg[theme=default] .gradio_interface .component_set { - background: transparent; - box-shadow: none; -} -.panel:nth-child(2) .gradio_wrapper .gradio_bg[theme=default] .gradio_interface .panel_header { - display: none; -} - -.gradio_wrapper .gradio_bg[is_embedded=false] .gradio_page .footer { - transform: scale(0.75); - filter: grayscale(1); -} - -.labels { - height: 20px; - width: auto; -} - -@media (max-width: 1000px){ - .panel:nth-child(1) { - margin-left: 0px; - margin-right: 0px; - } - .gradio_wrapper .gradio_bg[theme=default] .gradio_interface .output_image { - height: auto; - } -} -""" -creative_slider = gr.inputs.Radio(["Low", "Medium", "High"], default="Low", label='Creativity') -textbox = gr.inputs.Textbox(placeholder='An apartment with two bedrooms and one bathroom', lines="3", - label="DESCRIBE YOUR IDEAL APARTMENT") -generated = gr.outputs.Image(label='Generated Layout') -layout = gr.outputs.Textbox(label='Layout Coordinates') - -iface = gr.Interface(fn=prompt_to_layout, inputs=[textbox, creative_slider], - outputs=[generated, layout], - css=custom_css, - theme="default", - allow_flagging='never', - allow_screenshot=False, - thumbnail="thumbnail_gradio.PNG") - -iface.launch(enable_queue=True, share=True) \ No newline at end of file diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/models/xtts.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/models/xtts.py deleted file mode 100644 index 7cc9836a6795047e0ceb273c807dba243006a11e..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/models/xtts.py +++ /dev/null @@ -1,740 +0,0 @@ -import os -from contextlib import contextmanager -from dataclasses import dataclass - -import librosa -import torch -import torch.nn.functional as F -import torchaudio -from coqpit import Coqpit - -from TTS.tts.layers.tortoise.audio_utils import denormalize_tacotron_mel, wav_to_univnet_mel -from TTS.tts.layers.xtts.gpt import GPT -from TTS.tts.layers.xtts.hifigan_decoder import HifiDecoder -from TTS.tts.layers.xtts.stream_generator import init_stream_support -from TTS.tts.layers.xtts.tokenizer import VoiceBpeTokenizer -from TTS.tts.models.base_tts import BaseTTS -from TTS.utils.io import load_fsspec - -init_stream_support() - - -def wav_to_mel_cloning( - wav, - mel_norms_file="../experiments/clips_mel_norms.pth", - mel_norms=None, - device=torch.device("cpu"), - n_fft=4096, - hop_length=1024, - win_length=4096, - power=2, - normalized=False, - sample_rate=22050, - f_min=0, - f_max=8000, - n_mels=80, -): - """ - Convert waveform to mel-spectrogram with hard-coded parameters for cloning. - - Args: - wav (torch.Tensor): Input waveform tensor. - mel_norms_file (str): Path to mel-spectrogram normalization file. - mel_norms (torch.Tensor): Mel-spectrogram normalization tensor. - device (torch.device): Device to use for computation. - - Returns: - torch.Tensor: Mel-spectrogram tensor. - """ - mel_stft = torchaudio.transforms.MelSpectrogram( - n_fft=n_fft, - hop_length=hop_length, - win_length=win_length, - power=power, - normalized=normalized, - sample_rate=sample_rate, - f_min=f_min, - f_max=f_max, - n_mels=n_mels, - norm="slaney", - ).to(device) - wav = wav.to(device) - mel = mel_stft(wav) - mel = torch.log(torch.clamp(mel, min=1e-5)) - if mel_norms is None: - mel_norms = torch.load(mel_norms_file, map_location=device) - mel = mel / mel_norms.unsqueeze(0).unsqueeze(-1) - return mel - - -def load_audio(audiopath, sampling_rate): - # better load setting following: https://github.com/faroit/python_audio_loading_benchmark - if audiopath[-4:] == ".mp3": - # it uses torchaudio with sox backend to load mp3 - audio, lsr = torchaudio.backend.sox_io_backend.load(audiopath) - else: - # it uses torchaudio soundfile backend to load all the others data type - audio, lsr = torchaudio.backend.soundfile_backend.load(audiopath) - - # stereo to mono if needed - if audio.size(0) != 1: - audio = torch.mean(audio, dim=0, keepdim=True) - - if lsr != sampling_rate: - audio = torchaudio.functional.resample(audio, lsr, sampling_rate) - - # Check some assumptions about audio range. This should be automatically fixed in load_wav_to_torch, but might not be in some edge cases, where we should squawk. - # '10' is arbitrarily chosen since it seems like audio will often "overdrive" the [-1,1] bounds. - if torch.any(audio > 10) or not torch.any(audio < 0): - print(f"Error with {audiopath}. Max={audio.max()} min={audio.min()}") - # clip audio invalid values - audio.clip_(-1, 1) - return audio - - -def pad_or_truncate(t, length): - """ - Ensure a given tensor t has a specified sequence length by either padding it with zeros or clipping it. - - Args: - t (torch.Tensor): The input tensor to be padded or truncated. - length (int): The desired length of the tensor. - - Returns: - torch.Tensor: The padded or truncated tensor. - """ - tp = t[..., :length] - if t.shape[-1] == length: - tp = t - elif t.shape[-1] < length: - tp = F.pad(t, (0, length - t.shape[-1])) - return tp - - -@dataclass -class XttsAudioConfig(Coqpit): - """ - Configuration class for audio-related parameters in the XTTS model. - - Args: - sample_rate (int): The sample rate in which the GPT operates. - output_sample_rate (int): The sample rate of the output audio waveform. - """ - - sample_rate: int = 22050 - output_sample_rate: int = 24000 - - -@dataclass -class XttsArgs(Coqpit): - """A dataclass to represent XTTS model arguments that define the model structure. - - Args: - gpt_batch_size (int): The size of the auto-regressive batch. - enable_redaction (bool, optional): Whether to enable redaction. Defaults to True. - kv_cache (bool, optional): Whether to use the kv_cache. Defaults to True. - gpt_checkpoint (str, optional): The checkpoint for the autoregressive model. Defaults to None. - clvp_checkpoint (str, optional): The checkpoint for the ConditionalLatentVariablePerseq model. Defaults to None. - decoder_checkpoint (str, optional): The checkpoint for the DiffTTS model. Defaults to None. - num_chars (int, optional): The maximum number of characters to generate. Defaults to 255. - - For GPT model: - gpt_max_audio_tokens (int, optional): The maximum mel tokens for the autoregressive model. Defaults to 604. - gpt_max_text_tokens (int, optional): The maximum text tokens for the autoregressive model. Defaults to 402. - gpt_max_prompt_tokens (int, optional): The maximum prompt tokens or the autoregressive model. Defaults to 70. - gpt_layers (int, optional): The number of layers for the autoregressive model. Defaults to 30. - gpt_n_model_channels (int, optional): The model dimension for the autoregressive model. Defaults to 1024. - gpt_n_heads (int, optional): The number of heads for the autoregressive model. Defaults to 16. - gpt_number_text_tokens (int, optional): The number of text tokens for the autoregressive model. Defaults to 255. - gpt_start_text_token (int, optional): The start text token for the autoregressive model. Defaults to 255. - gpt_checkpointing (bool, optional): Whether to use checkpointing for the autoregressive model. Defaults to False. - gpt_train_solo_embeddings (bool, optional): Whether to train embeddings for the autoregressive model. Defaults to False. - gpt_code_stride_len (int, optional): The hop_size of dvae and consequently of the gpt output. Defaults to 1024. - gpt_use_masking_gt_prompt_approach (bool, optional): If True, it will use ground truth as prompt and it will mask the loss to avoid repetition. Defaults to True. - gpt_use_perceiver_resampler (bool, optional): If True, it will use perceiver resampler from flamingo paper - https://arxiv.org/abs/2204.14198. Defaults to False. - """ - - gpt_batch_size: int = 1 - enable_redaction: bool = False - kv_cache: bool = True - gpt_checkpoint: str = None - clvp_checkpoint: str = None - decoder_checkpoint: str = None - num_chars: int = 255 - - # XTTS GPT Encoder params - tokenizer_file: str = "" - gpt_max_audio_tokens: int = 605 - gpt_max_text_tokens: int = 402 - gpt_max_prompt_tokens: int = 70 - gpt_layers: int = 30 - gpt_n_model_channels: int = 1024 - gpt_n_heads: int = 16 - gpt_number_text_tokens: int = None - gpt_start_text_token: int = None - gpt_stop_text_token: int = None - gpt_num_audio_tokens: int = 8194 - gpt_start_audio_token: int = 8192 - gpt_stop_audio_token: int = 8193 - gpt_code_stride_len: int = 1024 - gpt_use_masking_gt_prompt_approach: bool = True - gpt_use_perceiver_resampler: bool = False - - # HifiGAN Decoder params - input_sample_rate: int = 22050 - output_sample_rate: int = 24000 - output_hop_length: int = 256 - decoder_input_dim: int = 1024 - d_vector_dim: int = 512 - cond_d_vector_in_each_upsampling_layer: bool = True - - # constants - duration_const: int = 102400 - - -class Xtts(BaseTTS): - """ⓍTTS model implementation. - - ❗ Currently it only supports inference. - - Examples: - >>> from TTS.tts.configs.xtts_config import XttsConfig - >>> from TTS.tts.models.xtts import Xtts - >>> config = XttsConfig() - >>> model = Xtts.inif_from_config(config) - >>> model.load_checkpoint(config, checkpoint_dir="paths/to/models_dir/", eval=True) - """ - - def __init__(self, config: Coqpit): - super().__init__(config, ap=None, tokenizer=None) - self.mel_stats_path = None - self.config = config - self.gpt_checkpoint = self.args.gpt_checkpoint - self.decoder_checkpoint = self.args.decoder_checkpoint # TODO: check if this is even needed - self.models_dir = config.model_dir - self.gpt_batch_size = self.args.gpt_batch_size - - self.tokenizer = VoiceBpeTokenizer() - self.gpt = None - self.init_models() - self.register_buffer("mel_stats", torch.ones(80)) - - def init_models(self): - """Initialize the models. We do it here since we need to load the tokenizer first.""" - if self.tokenizer.tokenizer is not None: - self.args.gpt_number_text_tokens = self.tokenizer.get_number_tokens() - self.args.gpt_start_text_token = self.tokenizer.tokenizer.token_to_id("[START]") - self.args.gpt_stop_text_token = self.tokenizer.tokenizer.token_to_id("[STOP]") - - if self.args.gpt_number_text_tokens: - self.gpt = GPT( - layers=self.args.gpt_layers, - model_dim=self.args.gpt_n_model_channels, - start_text_token=self.args.gpt_start_text_token, - stop_text_token=self.args.gpt_stop_text_token, - heads=self.args.gpt_n_heads, - max_text_tokens=self.args.gpt_max_text_tokens, - max_mel_tokens=self.args.gpt_max_audio_tokens, - max_prompt_tokens=self.args.gpt_max_prompt_tokens, - number_text_tokens=self.args.gpt_number_text_tokens, - num_audio_tokens=self.args.gpt_num_audio_tokens, - start_audio_token=self.args.gpt_start_audio_token, - stop_audio_token=self.args.gpt_stop_audio_token, - use_perceiver_resampler=self.args.gpt_use_perceiver_resampler, - code_stride_len=self.args.gpt_code_stride_len, - ) - - self.hifigan_decoder = HifiDecoder( - input_sample_rate=self.args.input_sample_rate, - output_sample_rate=self.args.output_sample_rate, - output_hop_length=self.args.output_hop_length, - ar_mel_length_compression=self.args.gpt_code_stride_len, - decoder_input_dim=self.args.decoder_input_dim, - d_vector_dim=self.args.d_vector_dim, - cond_d_vector_in_each_upsampling_layer=self.args.cond_d_vector_in_each_upsampling_layer, - ) - - @property - def device(self): - return next(self.parameters()).device - - @torch.inference_mode() - def get_gpt_cond_latents(self, audio, sr, length: int = 3): - """Compute the conditioning latents for the GPT model from the given audio. - - Args: - audio (tensor): audio tensor. - sr (int): Sample rate of the audio. - length (int): Length of the audio in seconds. Defaults to 3. - """ - if sr != 22050: - audio = torchaudio.functional.resample(audio, sr, 22050) - audio = audio[:, : 22050 * length] - if self.args.gpt_use_perceiver_resampler: - n_fft = 2048 - hop_length = 256 - win_length = 1024 - else: - n_fft = 4096 - hop_length = 1024 - win_length = 4096 - mel = wav_to_mel_cloning( - audio, - mel_norms=self.mel_stats.cpu(), - n_fft=n_fft, - hop_length=hop_length, - win_length=win_length, - power=2, - normalized=False, - sample_rate=22050, - f_min=0, - f_max=8000, - n_mels=80, - ) - cond_latent = self.gpt.get_style_emb(mel.to(self.device)) - return cond_latent.transpose(1, 2) - - @torch.inference_mode() - def get_diffusion_cond_latents(self, audio, sr): - from math import ceil - - diffusion_conds = [] - CHUNK_SIZE = 102400 - audio_24k = torchaudio.functional.resample(audio, sr, 24000) - for chunk in range(ceil(audio_24k.shape[1] / CHUNK_SIZE)): - current_sample = audio_24k[:, chunk * CHUNK_SIZE : (chunk + 1) * CHUNK_SIZE] - current_sample = pad_or_truncate(current_sample, CHUNK_SIZE) - cond_mel = wav_to_univnet_mel( - current_sample.to(self.device), - do_normalization=False, - device=self.device, - ) - diffusion_conds.append(cond_mel) - diffusion_conds = torch.stack(diffusion_conds, dim=1) - diffusion_latent = self.diffusion_decoder.get_conditioning(diffusion_conds) - return diffusion_latent - - @torch.inference_mode() - def get_speaker_embedding(self, audio, sr): - audio_16k = torchaudio.functional.resample(audio, sr, 16000) - return ( - self.hifigan_decoder.speaker_encoder.forward(audio_16k.to(self.device), l2_norm=True) - .unsqueeze(-1) - .to(self.device) - ) - - @torch.inference_mode() - def get_conditioning_latents( - self, - audio_path, - gpt_cond_len=6, - max_ref_length=10, - librosa_trim_db=None, - sound_norm_refs=False, - load_sr=24000, - ): - # deal with multiples references - if not isinstance(audio_path, list): - audio_paths = [audio_path] - else: - audio_paths = audio_path - - speaker_embeddings = [] - audios = [] - speaker_embedding = None - for file_path in audio_paths: - # load the audio in 24khz to avoid issued with multiple sr references - audio = load_audio(file_path, load_sr) - audio = audio[:, : load_sr * max_ref_length].to(self.device) - if audio.shape[0] > 1: - audio = audio.mean(0, keepdim=True) - if sound_norm_refs: - audio = (audio / torch.abs(audio).max()) * 0.75 - if librosa_trim_db is not None: - audio = librosa.effects.trim(audio, top_db=librosa_trim_db)[0] - - speaker_embedding = self.get_speaker_embedding(audio, load_sr) - speaker_embeddings.append(speaker_embedding) - - audios.append(audio) - - # use a merge of all references for gpt cond latents - full_audio = torch.cat(audios, dim=-1) - gpt_cond_latents = self.get_gpt_cond_latents(full_audio, load_sr, length=gpt_cond_len) # [1, 1024, T] - - if speaker_embeddings: - speaker_embedding = torch.stack(speaker_embeddings) - speaker_embedding = speaker_embedding.mean(dim=0) - - return gpt_cond_latents, speaker_embedding - - def synthesize(self, text, config, speaker_wav, language, **kwargs): - """Synthesize speech with the given input text. - - Args: - text (str): Input text. - config (XttsConfig): Config with inference parameters. - speaker_wav (list): List of paths to the speaker audio files to be used for cloning. - language (str): Language ID of the speaker. - **kwargs: Inference settings. See `inference()`. - - Returns: - A dictionary of the output values with `wav` as output waveform, `deterministic_seed` as seed used at inference, - `text_input` as text token IDs after tokenizer, `voice_samples` as samples used for cloning, `conditioning_latents` - as latents used at inference. - - """ - return self.inference_with_config(text, config, ref_audio_path=speaker_wav, language=language, **kwargs) - - def inference_with_config(self, text, config, ref_audio_path, language, **kwargs): - """ - inference with config - """ - assert ( - language in self.config.languages - ), f" ❗ Language {language} is not supported. Supported languages are {self.config.languages}" - # Use generally found best tuning knobs for generation. - settings = { - "temperature": config.temperature, - "length_penalty": config.length_penalty, - "repetition_penalty": config.repetition_penalty, - "top_k": config.top_k, - "top_p": config.top_p, - "gpt_cond_len": config.gpt_cond_len, - "max_ref_len": config.max_ref_len, - "sound_norm_refs": config.sound_norm_refs, - } - settings.update(kwargs) # allow overriding of preset settings with kwargs - return self.full_inference(text, ref_audio_path, language, **settings) - - @torch.inference_mode() - def full_inference( - self, - text, - ref_audio_path, - language, - # GPT inference - temperature=0.65, - length_penalty=1, - repetition_penalty=2.0, - top_k=50, - top_p=0.85, - do_sample=True, - # Cloning - gpt_cond_len=6, - max_ref_len=10, - sound_norm_refs=False, - **hf_generate_kwargs, - ): - """ - This function produces an audio clip of the given text being spoken with the given reference voice. - - Args: - text: (str) Text to be spoken. - - ref_audio_path: (str) Path to a reference audio file to be used for cloning. This audio file should be >3 - seconds long. - - language: (str) Language of the voice to be generated. - - temperature: (float) The softmax temperature of the autoregressive model. Defaults to 0.65. - - length_penalty: (float) A length penalty applied to the autoregressive decoder. Higher settings causes the - model to produce more terse outputs. Defaults to 1.0. - - repetition_penalty: (float) A penalty that prevents the autoregressive decoder from repeating itself during - decoding. Can be used to reduce the incidence of long silences or "uhhhhhhs", etc. Defaults to 2.0. - - top_k: (int) K value used in top-k sampling. [0,inf]. Lower values mean the decoder produces more "likely" - (aka boring) outputs. Defaults to 50. - - top_p: (float) P value used in nucleus sampling. (0,1]. Lower values mean the decoder produces more "likely" - (aka boring) outputs. Defaults to 0.8. - - gpt_cond_len: (int) Length of the audio used for cloning. If audio is shorter, then audio length is used - else the first `gpt_cond_len` secs is used. Defaults to 6 seconds. - - hf_generate_kwargs: (**kwargs) The huggingface Transformers generate API is used for the autoregressive - transformer. Extra keyword args fed to this function get forwarded directly to that API. Documentation - here: https://huggingface.co/docs/transformers/internal/generation_utils - - Returns: - Generated audio clip(s) as a torch tensor. Shape 1,S if k=1 else, (k,1,S) where S is the sample length. - Sample rate is 24kHz. - """ - (gpt_cond_latent, speaker_embedding) = self.get_conditioning_latents( - audio_path=ref_audio_path, - gpt_cond_len=gpt_cond_len, - max_ref_length=max_ref_len, - sound_norm_refs=sound_norm_refs, - ) - - return self.inference( - text, - language, - gpt_cond_latent, - speaker_embedding, - temperature=temperature, - length_penalty=length_penalty, - repetition_penalty=repetition_penalty, - top_k=top_k, - top_p=top_p, - do_sample=do_sample, - **hf_generate_kwargs, - ) - - @torch.inference_mode() - def inference( - self, - text, - language, - gpt_cond_latent, - speaker_embedding, - # GPT inference - temperature=0.65, - length_penalty=1, - repetition_penalty=2.0, - top_k=50, - top_p=0.85, - do_sample=True, - num_beams=1, - **hf_generate_kwargs, - ): - text = text.strip().lower() - text_tokens = torch.IntTensor(self.tokenizer.encode(text, lang=language)).unsqueeze(0).to(self.device) - - # print(" > Input text: ", text) - # print(" > Input text preprocessed: ",self.tokenizer.preprocess_text(text, language)) - # print(" > Input tokens: ", text_tokens) - # print(" > Decoded text: ", self.tokenizer.decode(text_tokens[0].cpu().numpy())) - assert ( - text_tokens.shape[-1] < self.args.gpt_max_text_tokens - ), " ❗ XTTS can only generate text with a maximum of 400 tokens." - - with torch.no_grad(): - gpt_codes = self.gpt.generate( - cond_latents=gpt_cond_latent, - text_inputs=text_tokens, - input_tokens=None, - do_sample=do_sample, - top_p=top_p, - top_k=top_k, - temperature=temperature, - num_return_sequences=self.gpt_batch_size, - num_beams=num_beams, - length_penalty=length_penalty, - repetition_penalty=repetition_penalty, - output_attentions=False, - **hf_generate_kwargs, - ) - expected_output_len = torch.tensor( - [gpt_codes.shape[-1] * self.gpt.code_stride_len], device=text_tokens.device - ) - - text_len = torch.tensor([text_tokens.shape[-1]], device=self.device) - gpt_latents = self.gpt( - text_tokens, - text_len, - gpt_codes, - expected_output_len, - cond_latents=gpt_cond_latent, - return_attentions=False, - return_latent=True, - ) - silence_token = 83 - ctokens = 0 - for k in range(gpt_codes.shape[-1]): - if gpt_codes[0, k] == silence_token: - ctokens += 1 - else: - ctokens = 0 - if ctokens > 8: - gpt_latents = gpt_latents[:, :k] - break - - wav = self.hifigan_decoder(gpt_latents, g=speaker_embedding) - - return { - "wav": wav.cpu().numpy().squeeze(), - "gpt_latents": gpt_latents, - "speaker_embedding": speaker_embedding, - } - - def handle_chunks(self, wav_gen, wav_gen_prev, wav_overlap, overlap_len): - """Handle chunk formatting in streaming mode""" - wav_chunk = wav_gen[:-overlap_len] - if wav_gen_prev is not None: - wav_chunk = wav_gen[(wav_gen_prev.shape[0] - overlap_len) : -overlap_len] - if wav_overlap is not None: - # cross fade the overlap section - if overlap_len > len(wav_chunk): - # wav_chunk is smaller than overlap_len, pass on last wav_gen - if wav_gen_prev is not None: - wav_chunk = wav_gen[(wav_gen_prev.shape[0] - overlap_len):] - else: - # not expecting will hit here as problem happens on last chunk - wav_chunk = wav_gen[-overlap_len:] - return wav_chunk, wav_gen, None - else: - crossfade_wav = wav_chunk[:overlap_len] - crossfade_wav = crossfade_wav * torch.linspace(0.0, 1.0, overlap_len).to(crossfade_wav.device) - wav_chunk[:overlap_len] = wav_overlap * torch.linspace(1.0, 0.0, overlap_len).to(wav_overlap.device) - wav_chunk[:overlap_len] += crossfade_wav - - wav_overlap = wav_gen[-overlap_len:] - wav_gen_prev = wav_gen - return wav_chunk, wav_gen_prev, wav_overlap - - @torch.inference_mode() - def inference_stream( - self, - text, - language, - gpt_cond_latent, - speaker_embedding, - # Streaming - stream_chunk_size=20, - overlap_wav_len=1024, - # GPT inference - temperature=0.65, - length_penalty=1, - repetition_penalty=2.0, - top_k=50, - top_p=0.85, - do_sample=True, - **hf_generate_kwargs, - ): - text = text.strip().lower() - text_tokens = torch.IntTensor(self.tokenizer.encode(text, lang=language)).unsqueeze(0).to(self.device) - - fake_inputs = self.gpt.compute_embeddings( - gpt_cond_latent.to(self.device), - text_tokens, - ) - gpt_generator = self.gpt.get_generator( - fake_inputs=fake_inputs, - top_k=top_k, - top_p=top_p, - temperature=temperature, - do_sample=do_sample, - num_beams=1, - num_return_sequences=1, - length_penalty=float(length_penalty), - repetition_penalty=float(repetition_penalty), - output_attentions=False, - output_hidden_states=True, - **hf_generate_kwargs, - ) - - last_tokens = [] - all_latents = [] - wav_gen_prev = None - wav_overlap = None - is_end = False - - while not is_end: - try: - x, latent = next(gpt_generator) - last_tokens += [x] - all_latents += [latent] - except StopIteration: - is_end = True - - if is_end or (stream_chunk_size > 0 and len(last_tokens) >= stream_chunk_size): - gpt_latents = torch.cat(all_latents, dim=0)[None, :] - wav_gen = self.hifigan_decoder(gpt_latents, g=speaker_embedding.to(self.device)) - wav_chunk, wav_gen_prev, wav_overlap = self.handle_chunks( - wav_gen.squeeze(), wav_gen_prev, wav_overlap, overlap_wav_len - ) - last_tokens = [] - yield wav_chunk - - def forward(self): - raise NotImplementedError( - "XTTS has a dedicated trainer, please check the XTTS docs: https://tts.readthedocs.io/en/dev/models/xtts.html#training" - ) - - def eval_step(self): - raise NotImplementedError( - "XTTS has a dedicated trainer, please check the XTTS docs: https://tts.readthedocs.io/en/dev/models/xtts.html#training" - ) - - @staticmethod - def init_from_config(config: "XttsConfig", **kwargs): # pylint: disable=unused-argument - return Xtts(config) - - def eval(self): # pylint: disable=redefined-builtin - """Sets the model to evaluation mode. Overrides the default eval() method to also set the GPT model to eval mode.""" - self.gpt.init_gpt_for_inference() - super().eval() - - def get_compatible_checkpoint_state_dict(self, model_path): - checkpoint = load_fsspec(model_path, map_location=torch.device("cpu"))["model"] - # remove xtts gpt trainer extra keys - ignore_keys = ["torch_mel_spectrogram_style_encoder", "torch_mel_spectrogram_dvae", "dvae"] - for key in list(checkpoint.keys()): - # check if it is from the coqui Trainer if so convert it - if key.startswith("xtts."): - new_key = key.replace("xtts.", "") - checkpoint[new_key] = checkpoint[key] - del checkpoint[key] - key = new_key - - # remove unused keys - if key.split(".")[0] in ignore_keys: - del checkpoint[key] - - return checkpoint - - def load_checkpoint( - self, - config, - checkpoint_dir=None, - checkpoint_path=None, - vocab_path=None, - eval=True, - strict=True, - use_deepspeed=False, - ): - """ - Loads a checkpoint from disk and initializes the model's state and tokenizer. - - Args: - config (dict): The configuration dictionary for the model. - checkpoint_dir (str, optional): The directory where the checkpoint is stored. Defaults to None. - checkpoint_path (str, optional): The path to the checkpoint file. Defaults to None. - vocab_path (str, optional): The path to the vocabulary file. Defaults to None. - eval (bool, optional): Whether to set the model to evaluation mode. Defaults to True. - strict (bool, optional): Whether to strictly enforce that the keys in the checkpoint match the keys in the model. Defaults to True. - - Returns: - None - """ - - model_path = checkpoint_path or os.path.join(checkpoint_dir, "model.pth") - vocab_path = vocab_path or os.path.join(checkpoint_dir, "vocab.json") - - if os.path.exists(vocab_path): - self.tokenizer = VoiceBpeTokenizer(vocab_file=vocab_path) - - self.init_models() - - checkpoint = self.get_compatible_checkpoint_state_dict(model_path) - - # deal with v1 and v1.1. V1 has the init_gpt_for_inference keys, v1.1 do not - try: - self.load_state_dict(checkpoint, strict=strict) - except: - if eval: - self.gpt.init_gpt_for_inference(kv_cache=self.args.kv_cache) - self.load_state_dict(checkpoint, strict=strict) - - if eval: - self.hifigan_decoder.eval() - self.gpt.init_gpt_for_inference(kv_cache=self.args.kv_cache, use_deepspeed=use_deepspeed) - self.gpt.eval() - - def train_step(self): - raise NotImplementedError( - "XTTS has a dedicated trainer, please check the XTTS docs: https://tts.readthedocs.io/en/dev/models/xtts.html#training" - ) diff --git a/spaces/arxify/RVC-beta-v2-0618/infer_pack/modules/F0Predictor/DioF0Predictor.py b/spaces/arxify/RVC-beta-v2-0618/infer_pack/modules/F0Predictor/DioF0Predictor.py deleted file mode 100644 index eb60d8830714338448be009d1075e3594337db15..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/infer_pack/modules/F0Predictor/DioF0Predictor.py +++ /dev/null @@ -1,90 +0,0 @@ -from infer_pack.modules.F0Predictor.F0Predictor import F0Predictor -import pyworld -import numpy as np - - -class DioF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def resize_f0(self, x, target_len): - source = np.array(x) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * target_len, len(source)) / target_len, - np.arange(0, len(source)), - source, - ) - res = np.nan_to_num(target) - return res - - def compute_f0(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.dio( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - for index, pitch in enumerate(f0): - f0[index] = round(pitch, 1) - return self.interpolate_f0(self.resize_f0(f0, p_len))[0] - - def compute_f0_uv(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.dio( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - for index, pitch in enumerate(f0): - f0[index] = round(pitch, 1) - return self.interpolate_f0(self.resize_f0(f0, p_len)) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/Util/_file_system.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/Util/_file_system.py deleted file mode 100644 index 1cb0c4bf4eec154f8bb483bdd3452f5dbd7eb361..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/Util/_file_system.py +++ /dev/null @@ -1,54 +0,0 @@ -# =================================================================== -# -# Copyright (c) 2016, Legrandin -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -# =================================================================== - -import os - - -def pycryptodome_filename(dir_comps, filename): - """Return the complete file name for the module - - dir_comps : list of string - The list of directory names in the PyCryptodome package. - The first element must be "Crypto". - - filename : string - The filename (inclusing extension) in the target directory. - """ - - if dir_comps[0] != "Crypto": - raise ValueError("Only available for modules under 'Crypto'") - - dir_comps = list(dir_comps[1:]) + [filename] - - util_lib, _ = os.path.split(os.path.abspath(__file__)) - root_lib = os.path.join(util_lib, "..") - - return os.path.join(root_lib, *dir_comps) - diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/cffi/recompiler.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/cffi/recompiler.py deleted file mode 100644 index 5d9d32d7132027562c5a29405d625899611bc977..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/cffi/recompiler.py +++ /dev/null @@ -1,1581 +0,0 @@ -import os, sys, io -from . import ffiplatform, model -from .error import VerificationError -from .cffi_opcode import * - -VERSION_BASE = 0x2601 -VERSION_EMBEDDED = 0x2701 -VERSION_CHAR16CHAR32 = 0x2801 - -USE_LIMITED_API = (sys.platform != 'win32' or sys.version_info < (3, 0) or - sys.version_info >= (3, 5)) - - -class GlobalExpr: - def __init__(self, name, address, type_op, size=0, check_value=0): - self.name = name - self.address = address - self.type_op = type_op - self.size = size - self.check_value = check_value - - def as_c_expr(self): - return ' { "%s", (void *)%s, %s, (void *)%s },' % ( - self.name, self.address, self.type_op.as_c_expr(), self.size) - - def as_python_expr(self): - return "b'%s%s',%d" % (self.type_op.as_python_bytes(), self.name, - self.check_value) - -class FieldExpr: - def __init__(self, name, field_offset, field_size, fbitsize, field_type_op): - self.name = name - self.field_offset = field_offset - self.field_size = field_size - self.fbitsize = fbitsize - self.field_type_op = field_type_op - - def as_c_expr(self): - spaces = " " * len(self.name) - return (' { "%s", %s,\n' % (self.name, self.field_offset) + - ' %s %s,\n' % (spaces, self.field_size) + - ' %s %s },' % (spaces, self.field_type_op.as_c_expr())) - - def as_python_expr(self): - raise NotImplementedError - - def as_field_python_expr(self): - if self.field_type_op.op == OP_NOOP: - size_expr = '' - elif self.field_type_op.op == OP_BITFIELD: - size_expr = format_four_bytes(self.fbitsize) - else: - raise NotImplementedError - return "b'%s%s%s'" % (self.field_type_op.as_python_bytes(), - size_expr, - self.name) - -class StructUnionExpr: - def __init__(self, name, type_index, flags, size, alignment, comment, - first_field_index, c_fields): - self.name = name - self.type_index = type_index - self.flags = flags - self.size = size - self.alignment = alignment - self.comment = comment - self.first_field_index = first_field_index - self.c_fields = c_fields - - def as_c_expr(self): - return (' { "%s", %d, %s,' % (self.name, self.type_index, self.flags) - + '\n %s, %s, ' % (self.size, self.alignment) - + '%d, %d ' % (self.first_field_index, len(self.c_fields)) - + ('/* %s */ ' % self.comment if self.comment else '') - + '},') - - def as_python_expr(self): - flags = eval(self.flags, G_FLAGS) - fields_expr = [c_field.as_field_python_expr() - for c_field in self.c_fields] - return "(b'%s%s%s',%s)" % ( - format_four_bytes(self.type_index), - format_four_bytes(flags), - self.name, - ','.join(fields_expr)) - -class EnumExpr: - def __init__(self, name, type_index, size, signed, allenums): - self.name = name - self.type_index = type_index - self.size = size - self.signed = signed - self.allenums = allenums - - def as_c_expr(self): - return (' { "%s", %d, _cffi_prim_int(%s, %s),\n' - ' "%s" },' % (self.name, self.type_index, - self.size, self.signed, self.allenums)) - - def as_python_expr(self): - prim_index = { - (1, 0): PRIM_UINT8, (1, 1): PRIM_INT8, - (2, 0): PRIM_UINT16, (2, 1): PRIM_INT16, - (4, 0): PRIM_UINT32, (4, 1): PRIM_INT32, - (8, 0): PRIM_UINT64, (8, 1): PRIM_INT64, - }[self.size, self.signed] - return "b'%s%s%s\\x00%s'" % (format_four_bytes(self.type_index), - format_four_bytes(prim_index), - self.name, self.allenums) - -class TypenameExpr: - def __init__(self, name, type_index): - self.name = name - self.type_index = type_index - - def as_c_expr(self): - return ' { "%s", %d },' % (self.name, self.type_index) - - def as_python_expr(self): - return "b'%s%s'" % (format_four_bytes(self.type_index), self.name) - - -# ____________________________________________________________ - - -class Recompiler: - _num_externpy = 0 - - def __init__(self, ffi, module_name, target_is_python=False): - self.ffi = ffi - self.module_name = module_name - self.target_is_python = target_is_python - self._version = VERSION_BASE - - def needs_version(self, ver): - self._version = max(self._version, ver) - - def collect_type_table(self): - self._typesdict = {} - self._generate("collecttype") - # - all_decls = sorted(self._typesdict, key=str) - # - # prepare all FUNCTION bytecode sequences first - self.cffi_types = [] - for tp in all_decls: - if tp.is_raw_function: - assert self._typesdict[tp] is None - self._typesdict[tp] = len(self.cffi_types) - self.cffi_types.append(tp) # placeholder - for tp1 in tp.args: - assert isinstance(tp1, (model.VoidType, - model.BasePrimitiveType, - model.PointerType, - model.StructOrUnionOrEnum, - model.FunctionPtrType)) - if self._typesdict[tp1] is None: - self._typesdict[tp1] = len(self.cffi_types) - self.cffi_types.append(tp1) # placeholder - self.cffi_types.append('END') # placeholder - # - # prepare all OTHER bytecode sequences - for tp in all_decls: - if not tp.is_raw_function and self._typesdict[tp] is None: - self._typesdict[tp] = len(self.cffi_types) - self.cffi_types.append(tp) # placeholder - if tp.is_array_type and tp.length is not None: - self.cffi_types.append('LEN') # placeholder - assert None not in self._typesdict.values() - # - # collect all structs and unions and enums - self._struct_unions = {} - self._enums = {} - for tp in all_decls: - if isinstance(tp, model.StructOrUnion): - self._struct_unions[tp] = None - elif isinstance(tp, model.EnumType): - self._enums[tp] = None - for i, tp in enumerate(sorted(self._struct_unions, - key=lambda tp: tp.name)): - self._struct_unions[tp] = i - for i, tp in enumerate(sorted(self._enums, - key=lambda tp: tp.name)): - self._enums[tp] = i - # - # emit all bytecode sequences now - for tp in all_decls: - method = getattr(self, '_emit_bytecode_' + tp.__class__.__name__) - method(tp, self._typesdict[tp]) - # - # consistency check - for op in self.cffi_types: - assert isinstance(op, CffiOp) - self.cffi_types = tuple(self.cffi_types) # don't change any more - - def _enum_fields(self, tp): - # When producing C, expand all anonymous struct/union fields. - # That's necessary to have C code checking the offsets of the - # individual fields contained in them. When producing Python, - # don't do it and instead write it like it is, with the - # corresponding fields having an empty name. Empty names are - # recognized at runtime when we import the generated Python - # file. - expand_anonymous_struct_union = not self.target_is_python - return tp.enumfields(expand_anonymous_struct_union) - - def _do_collect_type(self, tp): - if not isinstance(tp, model.BaseTypeByIdentity): - if isinstance(tp, tuple): - for x in tp: - self._do_collect_type(x) - return - if tp not in self._typesdict: - self._typesdict[tp] = None - if isinstance(tp, model.FunctionPtrType): - self._do_collect_type(tp.as_raw_function()) - elif isinstance(tp, model.StructOrUnion): - if tp.fldtypes is not None and ( - tp not in self.ffi._parser._included_declarations): - for name1, tp1, _, _ in self._enum_fields(tp): - self._do_collect_type(self._field_type(tp, name1, tp1)) - else: - for _, x in tp._get_items(): - self._do_collect_type(x) - - def _generate(self, step_name): - lst = self.ffi._parser._declarations.items() - for name, (tp, quals) in sorted(lst): - kind, realname = name.split(' ', 1) - try: - method = getattr(self, '_generate_cpy_%s_%s' % (kind, - step_name)) - except AttributeError: - raise VerificationError( - "not implemented in recompile(): %r" % name) - try: - self._current_quals = quals - method(tp, realname) - except Exception as e: - model.attach_exception_info(e, name) - raise - - # ---------- - - ALL_STEPS = ["global", "field", "struct_union", "enum", "typename"] - - def collect_step_tables(self): - # collect the declarations for '_cffi_globals', '_cffi_typenames', etc. - self._lsts = {} - for step_name in self.ALL_STEPS: - self._lsts[step_name] = [] - self._seen_struct_unions = set() - self._generate("ctx") - self._add_missing_struct_unions() - # - for step_name in self.ALL_STEPS: - lst = self._lsts[step_name] - if step_name != "field": - lst.sort(key=lambda entry: entry.name) - self._lsts[step_name] = tuple(lst) # don't change any more - # - # check for a possible internal inconsistency: _cffi_struct_unions - # should have been generated with exactly self._struct_unions - lst = self._lsts["struct_union"] - for tp, i in self._struct_unions.items(): - assert i < len(lst) - assert lst[i].name == tp.name - assert len(lst) == len(self._struct_unions) - # same with enums - lst = self._lsts["enum"] - for tp, i in self._enums.items(): - assert i < len(lst) - assert lst[i].name == tp.name - assert len(lst) == len(self._enums) - - # ---------- - - def _prnt(self, what=''): - self._f.write(what + '\n') - - def write_source_to_f(self, f, preamble): - if self.target_is_python: - assert preamble is None - self.write_py_source_to_f(f) - else: - assert preamble is not None - self.write_c_source_to_f(f, preamble) - - def _rel_readlines(self, filename): - g = open(os.path.join(os.path.dirname(__file__), filename), 'r') - lines = g.readlines() - g.close() - return lines - - def write_c_source_to_f(self, f, preamble): - self._f = f - prnt = self._prnt - if self.ffi._embedding is not None: - prnt('#define _CFFI_USE_EMBEDDING') - if not USE_LIMITED_API: - prnt('#define _CFFI_NO_LIMITED_API') - # - # first the '#include' (actually done by inlining the file's content) - lines = self._rel_readlines('_cffi_include.h') - i = lines.index('#include "parse_c_type.h"\n') - lines[i:i+1] = self._rel_readlines('parse_c_type.h') - prnt(''.join(lines)) - # - # if we have ffi._embedding != None, we give it here as a macro - # and include an extra file - base_module_name = self.module_name.split('.')[-1] - if self.ffi._embedding is not None: - prnt('#define _CFFI_MODULE_NAME "%s"' % (self.module_name,)) - prnt('static const char _CFFI_PYTHON_STARTUP_CODE[] = {') - self._print_string_literal_in_array(self.ffi._embedding) - prnt('0 };') - prnt('#ifdef PYPY_VERSION') - prnt('# define _CFFI_PYTHON_STARTUP_FUNC _cffi_pypyinit_%s' % ( - base_module_name,)) - prnt('#elif PY_MAJOR_VERSION >= 3') - prnt('# define _CFFI_PYTHON_STARTUP_FUNC PyInit_%s' % ( - base_module_name,)) - prnt('#else') - prnt('# define _CFFI_PYTHON_STARTUP_FUNC init%s' % ( - base_module_name,)) - prnt('#endif') - lines = self._rel_readlines('_embedding.h') - i = lines.index('#include "_cffi_errors.h"\n') - lines[i:i+1] = self._rel_readlines('_cffi_errors.h') - prnt(''.join(lines)) - self.needs_version(VERSION_EMBEDDED) - # - # then paste the C source given by the user, verbatim. - prnt('/************************************************************/') - prnt() - prnt(preamble) - prnt() - prnt('/************************************************************/') - prnt() - # - # the declaration of '_cffi_types' - prnt('static void *_cffi_types[] = {') - typeindex2type = dict([(i, tp) for (tp, i) in self._typesdict.items()]) - for i, op in enumerate(self.cffi_types): - comment = '' - if i in typeindex2type: - comment = ' // ' + typeindex2type[i]._get_c_name() - prnt('/* %2d */ %s,%s' % (i, op.as_c_expr(), comment)) - if not self.cffi_types: - prnt(' 0') - prnt('};') - prnt() - # - # call generate_cpy_xxx_decl(), for every xxx found from - # ffi._parser._declarations. This generates all the functions. - self._seen_constants = set() - self._generate("decl") - # - # the declaration of '_cffi_globals' and '_cffi_typenames' - nums = {} - for step_name in self.ALL_STEPS: - lst = self._lsts[step_name] - nums[step_name] = len(lst) - if nums[step_name] > 0: - prnt('static const struct _cffi_%s_s _cffi_%ss[] = {' % ( - step_name, step_name)) - for entry in lst: - prnt(entry.as_c_expr()) - prnt('};') - prnt() - # - # the declaration of '_cffi_includes' - if self.ffi._included_ffis: - prnt('static const char * const _cffi_includes[] = {') - for ffi_to_include in self.ffi._included_ffis: - try: - included_module_name, included_source = ( - ffi_to_include._assigned_source[:2]) - except AttributeError: - raise VerificationError( - "ffi object %r includes %r, but the latter has not " - "been prepared with set_source()" % ( - self.ffi, ffi_to_include,)) - if included_source is None: - raise VerificationError( - "not implemented yet: ffi.include() of a Python-based " - "ffi inside a C-based ffi") - prnt(' "%s",' % (included_module_name,)) - prnt(' NULL') - prnt('};') - prnt() - # - # the declaration of '_cffi_type_context' - prnt('static const struct _cffi_type_context_s _cffi_type_context = {') - prnt(' _cffi_types,') - for step_name in self.ALL_STEPS: - if nums[step_name] > 0: - prnt(' _cffi_%ss,' % step_name) - else: - prnt(' NULL, /* no %ss */' % step_name) - for step_name in self.ALL_STEPS: - if step_name != "field": - prnt(' %d, /* num_%ss */' % (nums[step_name], step_name)) - if self.ffi._included_ffis: - prnt(' _cffi_includes,') - else: - prnt(' NULL, /* no includes */') - prnt(' %d, /* num_types */' % (len(self.cffi_types),)) - flags = 0 - if self._num_externpy > 0 or self.ffi._embedding is not None: - flags |= 1 # set to mean that we use extern "Python" - prnt(' %d, /* flags */' % flags) - prnt('};') - prnt() - # - # the init function - prnt('#ifdef __GNUC__') - prnt('# pragma GCC visibility push(default) /* for -fvisibility= */') - prnt('#endif') - prnt() - prnt('#ifdef PYPY_VERSION') - prnt('PyMODINIT_FUNC') - prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,)) - prnt('{') - if flags & 1: - prnt(' if (((intptr_t)p[0]) >= 0x0A03) {') - prnt(' _cffi_call_python_org = ' - '(void(*)(struct _cffi_externpy_s *, char *))p[1];') - prnt(' }') - prnt(' p[0] = (const void *)0x%x;' % self._version) - prnt(' p[1] = &_cffi_type_context;') - prnt('#if PY_MAJOR_VERSION >= 3') - prnt(' return NULL;') - prnt('#endif') - prnt('}') - # on Windows, distutils insists on putting init_cffi_xyz in - # 'export_symbols', so instead of fighting it, just give up and - # give it one - prnt('# ifdef _MSC_VER') - prnt(' PyMODINIT_FUNC') - prnt('# if PY_MAJOR_VERSION >= 3') - prnt(' PyInit_%s(void) { return NULL; }' % (base_module_name,)) - prnt('# else') - prnt(' init%s(void) { }' % (base_module_name,)) - prnt('# endif') - prnt('# endif') - prnt('#elif PY_MAJOR_VERSION >= 3') - prnt('PyMODINIT_FUNC') - prnt('PyInit_%s(void)' % (base_module_name,)) - prnt('{') - prnt(' return _cffi_init("%s", 0x%x, &_cffi_type_context);' % ( - self.module_name, self._version)) - prnt('}') - prnt('#else') - prnt('PyMODINIT_FUNC') - prnt('init%s(void)' % (base_module_name,)) - prnt('{') - prnt(' _cffi_init("%s", 0x%x, &_cffi_type_context);' % ( - self.module_name, self._version)) - prnt('}') - prnt('#endif') - prnt() - prnt('#ifdef __GNUC__') - prnt('# pragma GCC visibility pop') - prnt('#endif') - self._version = None - - def _to_py(self, x): - if isinstance(x, str): - return "b'%s'" % (x,) - if isinstance(x, (list, tuple)): - rep = [self._to_py(item) for item in x] - if len(rep) == 1: - rep.append('') - return "(%s)" % (','.join(rep),) - return x.as_python_expr() # Py2: unicode unexpected; Py3: bytes unexp. - - def write_py_source_to_f(self, f): - self._f = f - prnt = self._prnt - # - # header - prnt("# auto-generated file") - prnt("import _cffi_backend") - # - # the 'import' of the included ffis - num_includes = len(self.ffi._included_ffis or ()) - for i in range(num_includes): - ffi_to_include = self.ffi._included_ffis[i] - try: - included_module_name, included_source = ( - ffi_to_include._assigned_source[:2]) - except AttributeError: - raise VerificationError( - "ffi object %r includes %r, but the latter has not " - "been prepared with set_source()" % ( - self.ffi, ffi_to_include,)) - if included_source is not None: - raise VerificationError( - "not implemented yet: ffi.include() of a C-based " - "ffi inside a Python-based ffi") - prnt('from %s import ffi as _ffi%d' % (included_module_name, i)) - prnt() - prnt("ffi = _cffi_backend.FFI('%s'," % (self.module_name,)) - prnt(" _version = 0x%x," % (self._version,)) - self._version = None - # - # the '_types' keyword argument - self.cffi_types = tuple(self.cffi_types) # don't change any more - types_lst = [op.as_python_bytes() for op in self.cffi_types] - prnt(' _types = %s,' % (self._to_py(''.join(types_lst)),)) - typeindex2type = dict([(i, tp) for (tp, i) in self._typesdict.items()]) - # - # the keyword arguments from ALL_STEPS - for step_name in self.ALL_STEPS: - lst = self._lsts[step_name] - if len(lst) > 0 and step_name != "field": - prnt(' _%ss = %s,' % (step_name, self._to_py(lst))) - # - # the '_includes' keyword argument - if num_includes > 0: - prnt(' _includes = (%s,),' % ( - ', '.join(['_ffi%d' % i for i in range(num_includes)]),)) - # - # the footer - prnt(')') - - # ---------- - - def _gettypenum(self, type): - # a KeyError here is a bug. please report it! :-) - return self._typesdict[type] - - def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode): - extraarg = '' - if isinstance(tp, model.BasePrimitiveType) and not tp.is_complex_type(): - if tp.is_integer_type() and tp.name != '_Bool': - converter = '_cffi_to_c_int' - extraarg = ', %s' % tp.name - elif isinstance(tp, model.UnknownFloatType): - # don't check with is_float_type(): it may be a 'long - # double' here, and _cffi_to_c_double would loose precision - converter = '(%s)_cffi_to_c_double' % (tp.get_c_name(''),) - else: - cname = tp.get_c_name('') - converter = '(%s)_cffi_to_c_%s' % (cname, - tp.name.replace(' ', '_')) - if cname in ('char16_t', 'char32_t'): - self.needs_version(VERSION_CHAR16CHAR32) - errvalue = '-1' - # - elif isinstance(tp, model.PointerType): - self._convert_funcarg_to_c_ptr_or_array(tp, fromvar, - tovar, errcode) - return - # - elif (isinstance(tp, model.StructOrUnionOrEnum) or - isinstance(tp, model.BasePrimitiveType)): - # a struct (not a struct pointer) as a function argument; - # or, a complex (the same code works) - self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)' - % (tovar, self._gettypenum(tp), fromvar)) - self._prnt(' %s;' % errcode) - return - # - elif isinstance(tp, model.FunctionPtrType): - converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('') - extraarg = ', _cffi_type(%d)' % self._gettypenum(tp) - errvalue = 'NULL' - # - else: - raise NotImplementedError(tp) - # - self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg)) - self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % ( - tovar, tp.get_c_name(''), errvalue)) - self._prnt(' %s;' % errcode) - - def _extra_local_variables(self, tp, localvars, freelines): - if isinstance(tp, model.PointerType): - localvars.add('Py_ssize_t datasize') - localvars.add('struct _cffi_freeme_s *large_args_free = NULL') - freelines.add('if (large_args_free != NULL)' - ' _cffi_free_array_arguments(large_args_free);') - - def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode): - self._prnt(' datasize = _cffi_prepare_pointer_call_argument(') - self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % ( - self._gettypenum(tp), fromvar, tovar)) - self._prnt(' if (datasize != 0) {') - self._prnt(' %s = ((size_t)datasize) <= 640 ? ' - '(%s)alloca((size_t)datasize) : NULL;' % ( - tovar, tp.get_c_name(''))) - self._prnt(' if (_cffi_convert_array_argument(_cffi_type(%d), %s, ' - '(char **)&%s,' % (self._gettypenum(tp), fromvar, tovar)) - self._prnt(' datasize, &large_args_free) < 0)') - self._prnt(' %s;' % errcode) - self._prnt(' }') - - def _convert_expr_from_c(self, tp, var, context): - if isinstance(tp, model.BasePrimitiveType): - if tp.is_integer_type() and tp.name != '_Bool': - return '_cffi_from_c_int(%s, %s)' % (var, tp.name) - elif isinstance(tp, model.UnknownFloatType): - return '_cffi_from_c_double(%s)' % (var,) - elif tp.name != 'long double' and not tp.is_complex_type(): - cname = tp.name.replace(' ', '_') - if cname in ('char16_t', 'char32_t'): - self.needs_version(VERSION_CHAR16CHAR32) - return '_cffi_from_c_%s(%s)' % (cname, var) - else: - return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( - var, self._gettypenum(tp)) - elif isinstance(tp, (model.PointerType, model.FunctionPtrType)): - return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( - var, self._gettypenum(tp)) - elif isinstance(tp, model.ArrayType): - return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( - var, self._gettypenum(model.PointerType(tp.item))) - elif isinstance(tp, model.StructOrUnion): - if tp.fldnames is None: - raise TypeError("'%s' is used as %s, but is opaque" % ( - tp._get_c_name(), context)) - return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % ( - var, self._gettypenum(tp)) - elif isinstance(tp, model.EnumType): - return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( - var, self._gettypenum(tp)) - else: - raise NotImplementedError(tp) - - # ---------- - # typedefs - - def _typedef_type(self, tp, name): - return self._global_type(tp, "(*(%s *)0)" % (name,)) - - def _generate_cpy_typedef_collecttype(self, tp, name): - self._do_collect_type(self._typedef_type(tp, name)) - - def _generate_cpy_typedef_decl(self, tp, name): - pass - - def _typedef_ctx(self, tp, name): - type_index = self._typesdict[tp] - self._lsts["typename"].append(TypenameExpr(name, type_index)) - - def _generate_cpy_typedef_ctx(self, tp, name): - tp = self._typedef_type(tp, name) - self._typedef_ctx(tp, name) - if getattr(tp, "origin", None) == "unknown_type": - self._struct_ctx(tp, tp.name, approxname=None) - elif isinstance(tp, model.NamedPointerType): - self._struct_ctx(tp.totype, tp.totype.name, approxname=tp.name, - named_ptr=tp) - - # ---------- - # function declarations - - def _generate_cpy_function_collecttype(self, tp, name): - self._do_collect_type(tp.as_raw_function()) - if tp.ellipsis and not self.target_is_python: - self._do_collect_type(tp) - - def _generate_cpy_function_decl(self, tp, name): - assert not self.target_is_python - assert isinstance(tp, model.FunctionPtrType) - if tp.ellipsis: - # cannot support vararg functions better than this: check for its - # exact type (including the fixed arguments), and build it as a - # constant function pointer (no CPython wrapper) - self._generate_cpy_constant_decl(tp, name) - return - prnt = self._prnt - numargs = len(tp.args) - if numargs == 0: - argname = 'noarg' - elif numargs == 1: - argname = 'arg0' - else: - argname = 'args' - # - # ------------------------------ - # the 'd' version of the function, only for addressof(lib, 'func') - arguments = [] - call_arguments = [] - context = 'argument of %s' % name - for i, type in enumerate(tp.args): - arguments.append(type.get_c_name(' x%d' % i, context)) - call_arguments.append('x%d' % i) - repr_arguments = ', '.join(arguments) - repr_arguments = repr_arguments or 'void' - if tp.abi: - abi = tp.abi + ' ' - else: - abi = '' - name_and_arguments = '%s_cffi_d_%s(%s)' % (abi, name, repr_arguments) - prnt('static %s' % (tp.result.get_c_name(name_and_arguments),)) - prnt('{') - call_arguments = ', '.join(call_arguments) - result_code = 'return ' - if isinstance(tp.result, model.VoidType): - result_code = '' - prnt(' %s%s(%s);' % (result_code, name, call_arguments)) - prnt('}') - # - prnt('#ifndef PYPY_VERSION') # ------------------------------ - # - prnt('static PyObject *') - prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname)) - prnt('{') - # - context = 'argument of %s' % name - for i, type in enumerate(tp.args): - arg = type.get_c_name(' x%d' % i, context) - prnt(' %s;' % arg) - # - localvars = set() - freelines = set() - for type in tp.args: - self._extra_local_variables(type, localvars, freelines) - for decl in sorted(localvars): - prnt(' %s;' % (decl,)) - # - if not isinstance(tp.result, model.VoidType): - result_code = 'result = ' - context = 'result of %s' % name - result_decl = ' %s;' % tp.result.get_c_name(' result', context) - prnt(result_decl) - prnt(' PyObject *pyresult;') - else: - result_decl = None - result_code = '' - # - if len(tp.args) > 1: - rng = range(len(tp.args)) - for i in rng: - prnt(' PyObject *arg%d;' % i) - prnt() - prnt(' if (!PyArg_UnpackTuple(args, "%s", %d, %d, %s))' % ( - name, len(rng), len(rng), - ', '.join(['&arg%d' % i for i in rng]))) - prnt(' return NULL;') - prnt() - # - for i, type in enumerate(tp.args): - self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i, - 'return NULL') - prnt() - # - prnt(' Py_BEGIN_ALLOW_THREADS') - prnt(' _cffi_restore_errno();') - call_arguments = ['x%d' % i for i in range(len(tp.args))] - call_arguments = ', '.join(call_arguments) - prnt(' { %s%s(%s); }' % (result_code, name, call_arguments)) - prnt(' _cffi_save_errno();') - prnt(' Py_END_ALLOW_THREADS') - prnt() - # - prnt(' (void)self; /* unused */') - if numargs == 0: - prnt(' (void)noarg; /* unused */') - if result_code: - prnt(' pyresult = %s;' % - self._convert_expr_from_c(tp.result, 'result', 'result type')) - for freeline in freelines: - prnt(' ' + freeline) - prnt(' return pyresult;') - else: - for freeline in freelines: - prnt(' ' + freeline) - prnt(' Py_INCREF(Py_None);') - prnt(' return Py_None;') - prnt('}') - # - prnt('#else') # ------------------------------ - # - # the PyPy version: need to replace struct/union arguments with - # pointers, and if the result is a struct/union, insert a first - # arg that is a pointer to the result. We also do that for - # complex args and return type. - def need_indirection(type): - return (isinstance(type, model.StructOrUnion) or - (isinstance(type, model.PrimitiveType) and - type.is_complex_type())) - difference = False - arguments = [] - call_arguments = [] - context = 'argument of %s' % name - for i, type in enumerate(tp.args): - indirection = '' - if need_indirection(type): - indirection = '*' - difference = True - arg = type.get_c_name(' %sx%d' % (indirection, i), context) - arguments.append(arg) - call_arguments.append('%sx%d' % (indirection, i)) - tp_result = tp.result - if need_indirection(tp_result): - context = 'result of %s' % name - arg = tp_result.get_c_name(' *result', context) - arguments.insert(0, arg) - tp_result = model.void_type - result_decl = None - result_code = '*result = ' - difference = True - if difference: - repr_arguments = ', '.join(arguments) - repr_arguments = repr_arguments or 'void' - name_and_arguments = '%s_cffi_f_%s(%s)' % (abi, name, - repr_arguments) - prnt('static %s' % (tp_result.get_c_name(name_and_arguments),)) - prnt('{') - if result_decl: - prnt(result_decl) - call_arguments = ', '.join(call_arguments) - prnt(' { %s%s(%s); }' % (result_code, name, call_arguments)) - if result_decl: - prnt(' return result;') - prnt('}') - else: - prnt('# define _cffi_f_%s _cffi_d_%s' % (name, name)) - # - prnt('#endif') # ------------------------------ - prnt() - - def _generate_cpy_function_ctx(self, tp, name): - if tp.ellipsis and not self.target_is_python: - self._generate_cpy_constant_ctx(tp, name) - return - type_index = self._typesdict[tp.as_raw_function()] - numargs = len(tp.args) - if self.target_is_python: - meth_kind = OP_DLOPEN_FUNC - elif numargs == 0: - meth_kind = OP_CPYTHON_BLTN_N # 'METH_NOARGS' - elif numargs == 1: - meth_kind = OP_CPYTHON_BLTN_O # 'METH_O' - else: - meth_kind = OP_CPYTHON_BLTN_V # 'METH_VARARGS' - self._lsts["global"].append( - GlobalExpr(name, '_cffi_f_%s' % name, - CffiOp(meth_kind, type_index), - size='_cffi_d_%s' % name)) - - # ---------- - # named structs or unions - - def _field_type(self, tp_struct, field_name, tp_field): - if isinstance(tp_field, model.ArrayType): - actual_length = tp_field.length - if actual_length == '...': - ptr_struct_name = tp_struct.get_c_name('*') - actual_length = '_cffi_array_len(((%s)0)->%s)' % ( - ptr_struct_name, field_name) - tp_item = self._field_type(tp_struct, '%s[0]' % field_name, - tp_field.item) - tp_field = model.ArrayType(tp_item, actual_length) - return tp_field - - def _struct_collecttype(self, tp): - self._do_collect_type(tp) - if self.target_is_python: - # also requires nested anon struct/unions in ABI mode, recursively - for fldtype in tp.anonymous_struct_fields(): - self._struct_collecttype(fldtype) - - def _struct_decl(self, tp, cname, approxname): - if tp.fldtypes is None: - return - prnt = self._prnt - checkfuncname = '_cffi_checkfld_%s' % (approxname,) - prnt('_CFFI_UNUSED_FN') - prnt('static void %s(%s *p)' % (checkfuncname, cname)) - prnt('{') - prnt(' /* only to generate compile-time warnings or errors */') - prnt(' (void)p;') - for fname, ftype, fbitsize, fqual in self._enum_fields(tp): - try: - if ftype.is_integer_type() or fbitsize >= 0: - # accept all integers, but complain on float or double - if fname != '': - prnt(" (void)((p->%s) | 0); /* check that '%s.%s' is " - "an integer */" % (fname, cname, fname)) - continue - # only accept exactly the type declared, except that '[]' - # is interpreted as a '*' and so will match any array length. - # (It would also match '*', but that's harder to detect...) - while (isinstance(ftype, model.ArrayType) - and (ftype.length is None or ftype.length == '...')): - ftype = ftype.item - fname = fname + '[0]' - prnt(' { %s = &p->%s; (void)tmp; }' % ( - ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), - fname)) - except VerificationError as e: - prnt(' /* %s */' % str(e)) # cannot verify it, ignore - prnt('}') - prnt('struct _cffi_align_%s { char x; %s y; };' % (approxname, cname)) - prnt() - - def _struct_ctx(self, tp, cname, approxname, named_ptr=None): - type_index = self._typesdict[tp] - reason_for_not_expanding = None - flags = [] - if isinstance(tp, model.UnionType): - flags.append("_CFFI_F_UNION") - if tp.fldtypes is None: - flags.append("_CFFI_F_OPAQUE") - reason_for_not_expanding = "opaque" - if (tp not in self.ffi._parser._included_declarations and - (named_ptr is None or - named_ptr not in self.ffi._parser._included_declarations)): - if tp.fldtypes is None: - pass # opaque - elif tp.partial or any(tp.anonymous_struct_fields()): - pass # field layout obtained silently from the C compiler - else: - flags.append("_CFFI_F_CHECK_FIELDS") - if tp.packed: - if tp.packed > 1: - raise NotImplementedError( - "%r is declared with 'pack=%r'; only 0 or 1 are " - "supported in API mode (try to use \"...;\", which " - "does not require a 'pack' declaration)" % - (tp, tp.packed)) - flags.append("_CFFI_F_PACKED") - else: - flags.append("_CFFI_F_EXTERNAL") - reason_for_not_expanding = "external" - flags = '|'.join(flags) or '0' - c_fields = [] - if reason_for_not_expanding is None: - enumfields = list(self._enum_fields(tp)) - for fldname, fldtype, fbitsize, fqual in enumfields: - fldtype = self._field_type(tp, fldname, fldtype) - self._check_not_opaque(fldtype, - "field '%s.%s'" % (tp.name, fldname)) - # cname is None for _add_missing_struct_unions() only - op = OP_NOOP - if fbitsize >= 0: - op = OP_BITFIELD - size = '%d /* bits */' % fbitsize - elif cname is None or ( - isinstance(fldtype, model.ArrayType) and - fldtype.length is None): - size = '(size_t)-1' - else: - size = 'sizeof(((%s)0)->%s)' % ( - tp.get_c_name('*') if named_ptr is None - else named_ptr.name, - fldname) - if cname is None or fbitsize >= 0: - offset = '(size_t)-1' - elif named_ptr is not None: - offset = '((char *)&((%s)0)->%s) - (char *)0' % ( - named_ptr.name, fldname) - else: - offset = 'offsetof(%s, %s)' % (tp.get_c_name(''), fldname) - c_fields.append( - FieldExpr(fldname, offset, size, fbitsize, - CffiOp(op, self._typesdict[fldtype]))) - first_field_index = len(self._lsts["field"]) - self._lsts["field"].extend(c_fields) - # - if cname is None: # unknown name, for _add_missing_struct_unions - size = '(size_t)-2' - align = -2 - comment = "unnamed" - else: - if named_ptr is not None: - size = 'sizeof(*(%s)0)' % (named_ptr.name,) - align = '-1 /* unknown alignment */' - else: - size = 'sizeof(%s)' % (cname,) - align = 'offsetof(struct _cffi_align_%s, y)' % (approxname,) - comment = None - else: - size = '(size_t)-1' - align = -1 - first_field_index = -1 - comment = reason_for_not_expanding - self._lsts["struct_union"].append( - StructUnionExpr(tp.name, type_index, flags, size, align, comment, - first_field_index, c_fields)) - self._seen_struct_unions.add(tp) - - def _check_not_opaque(self, tp, location): - while isinstance(tp, model.ArrayType): - tp = tp.item - if isinstance(tp, model.StructOrUnion) and tp.fldtypes is None: - raise TypeError( - "%s is of an opaque type (not declared in cdef())" % location) - - def _add_missing_struct_unions(self): - # not very nice, but some struct declarations might be missing - # because they don't have any known C name. Check that they are - # not partial (we can't complete or verify them!) and emit them - # anonymously. - lst = list(self._struct_unions.items()) - lst.sort(key=lambda tp_order: tp_order[1]) - for tp, order in lst: - if tp not in self._seen_struct_unions: - if tp.partial: - raise NotImplementedError("internal inconsistency: %r is " - "partial but was not seen at " - "this point" % (tp,)) - if tp.name.startswith('$') and tp.name[1:].isdigit(): - approxname = tp.name[1:] - elif tp.name == '_IO_FILE' and tp.forcename == 'FILE': - approxname = 'FILE' - self._typedef_ctx(tp, 'FILE') - else: - raise NotImplementedError("internal inconsistency: %r" % - (tp,)) - self._struct_ctx(tp, None, approxname) - - def _generate_cpy_struct_collecttype(self, tp, name): - self._struct_collecttype(tp) - _generate_cpy_union_collecttype = _generate_cpy_struct_collecttype - - def _struct_names(self, tp): - cname = tp.get_c_name('') - if ' ' in cname: - return cname, cname.replace(' ', '_') - else: - return cname, '_' + cname - - def _generate_cpy_struct_decl(self, tp, name): - self._struct_decl(tp, *self._struct_names(tp)) - _generate_cpy_union_decl = _generate_cpy_struct_decl - - def _generate_cpy_struct_ctx(self, tp, name): - self._struct_ctx(tp, *self._struct_names(tp)) - _generate_cpy_union_ctx = _generate_cpy_struct_ctx - - # ---------- - # 'anonymous' declarations. These are produced for anonymous structs - # or unions; the 'name' is obtained by a typedef. - - def _generate_cpy_anonymous_collecttype(self, tp, name): - if isinstance(tp, model.EnumType): - self._generate_cpy_enum_collecttype(tp, name) - else: - self._struct_collecttype(tp) - - def _generate_cpy_anonymous_decl(self, tp, name): - if isinstance(tp, model.EnumType): - self._generate_cpy_enum_decl(tp) - else: - self._struct_decl(tp, name, 'typedef_' + name) - - def _generate_cpy_anonymous_ctx(self, tp, name): - if isinstance(tp, model.EnumType): - self._enum_ctx(tp, name) - else: - self._struct_ctx(tp, name, 'typedef_' + name) - - # ---------- - # constants, declared with "static const ..." - - def _generate_cpy_const(self, is_int, name, tp=None, category='const', - check_value=None): - if (category, name) in self._seen_constants: - raise VerificationError( - "duplicate declaration of %s '%s'" % (category, name)) - self._seen_constants.add((category, name)) - # - prnt = self._prnt - funcname = '_cffi_%s_%s' % (category, name) - if is_int: - prnt('static int %s(unsigned long long *o)' % funcname) - prnt('{') - prnt(' int n = (%s) <= 0;' % (name,)) - prnt(' *o = (unsigned long long)((%s) | 0);' - ' /* check that %s is an integer */' % (name, name)) - if check_value is not None: - if check_value > 0: - check_value = '%dU' % (check_value,) - prnt(' if (!_cffi_check_int(*o, n, %s))' % (check_value,)) - prnt(' n |= 2;') - prnt(' return n;') - prnt('}') - else: - assert check_value is None - prnt('static void %s(char *o)' % funcname) - prnt('{') - prnt(' *(%s)o = %s;' % (tp.get_c_name('*'), name)) - prnt('}') - prnt() - - def _generate_cpy_constant_collecttype(self, tp, name): - is_int = tp.is_integer_type() - if not is_int or self.target_is_python: - self._do_collect_type(tp) - - def _generate_cpy_constant_decl(self, tp, name): - is_int = tp.is_integer_type() - self._generate_cpy_const(is_int, name, tp) - - def _generate_cpy_constant_ctx(self, tp, name): - if not self.target_is_python and tp.is_integer_type(): - type_op = CffiOp(OP_CONSTANT_INT, -1) - else: - if self.target_is_python: - const_kind = OP_DLOPEN_CONST - else: - const_kind = OP_CONSTANT - type_index = self._typesdict[tp] - type_op = CffiOp(const_kind, type_index) - self._lsts["global"].append( - GlobalExpr(name, '_cffi_const_%s' % name, type_op)) - - # ---------- - # enums - - def _generate_cpy_enum_collecttype(self, tp, name): - self._do_collect_type(tp) - - def _generate_cpy_enum_decl(self, tp, name=None): - for enumerator in tp.enumerators: - self._generate_cpy_const(True, enumerator) - - def _enum_ctx(self, tp, cname): - type_index = self._typesdict[tp] - type_op = CffiOp(OP_ENUM, -1) - if self.target_is_python: - tp.check_not_partial() - for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): - self._lsts["global"].append( - GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op, - check_value=enumvalue)) - # - if cname is not None and '$' not in cname and not self.target_is_python: - size = "sizeof(%s)" % cname - signed = "((%s)-1) <= 0" % cname - else: - basetp = tp.build_baseinttype(self.ffi, []) - size = self.ffi.sizeof(basetp) - signed = int(int(self.ffi.cast(basetp, -1)) < 0) - allenums = ",".join(tp.enumerators) - self._lsts["enum"].append( - EnumExpr(tp.name, type_index, size, signed, allenums)) - - def _generate_cpy_enum_ctx(self, tp, name): - self._enum_ctx(tp, tp._get_c_name()) - - # ---------- - # macros: for now only for integers - - def _generate_cpy_macro_collecttype(self, tp, name): - pass - - def _generate_cpy_macro_decl(self, tp, name): - if tp == '...': - check_value = None - else: - check_value = tp # an integer - self._generate_cpy_const(True, name, check_value=check_value) - - def _generate_cpy_macro_ctx(self, tp, name): - if tp == '...': - if self.target_is_python: - raise VerificationError( - "cannot use the syntax '...' in '#define %s ...' when " - "using the ABI mode" % (name,)) - check_value = None - else: - check_value = tp # an integer - type_op = CffiOp(OP_CONSTANT_INT, -1) - self._lsts["global"].append( - GlobalExpr(name, '_cffi_const_%s' % name, type_op, - check_value=check_value)) - - # ---------- - # global variables - - def _global_type(self, tp, global_name): - if isinstance(tp, model.ArrayType): - actual_length = tp.length - if actual_length == '...': - actual_length = '_cffi_array_len(%s)' % (global_name,) - tp_item = self._global_type(tp.item, '%s[0]' % global_name) - tp = model.ArrayType(tp_item, actual_length) - return tp - - def _generate_cpy_variable_collecttype(self, tp, name): - self._do_collect_type(self._global_type(tp, name)) - - def _generate_cpy_variable_decl(self, tp, name): - prnt = self._prnt - tp = self._global_type(tp, name) - if isinstance(tp, model.ArrayType) and tp.length is None: - tp = tp.item - ampersand = '' - else: - ampersand = '&' - # This code assumes that casts from "tp *" to "void *" is a - # no-op, i.e. a function that returns a "tp *" can be called - # as if it returned a "void *". This should be generally true - # on any modern machine. The only exception to that rule (on - # uncommon architectures, and as far as I can tell) might be - # if 'tp' were a function type, but that is not possible here. - # (If 'tp' is a function _pointer_ type, then casts from "fn_t - # **" to "void *" are again no-ops, as far as I can tell.) - decl = '*_cffi_var_%s(void)' % (name,) - prnt('static ' + tp.get_c_name(decl, quals=self._current_quals)) - prnt('{') - prnt(' return %s(%s);' % (ampersand, name)) - prnt('}') - prnt() - - def _generate_cpy_variable_ctx(self, tp, name): - tp = self._global_type(tp, name) - type_index = self._typesdict[tp] - if self.target_is_python: - op = OP_GLOBAL_VAR - else: - op = OP_GLOBAL_VAR_F - self._lsts["global"].append( - GlobalExpr(name, '_cffi_var_%s' % name, CffiOp(op, type_index))) - - # ---------- - # extern "Python" - - def _generate_cpy_extern_python_collecttype(self, tp, name): - assert isinstance(tp, model.FunctionPtrType) - self._do_collect_type(tp) - _generate_cpy_dllexport_python_collecttype = \ - _generate_cpy_extern_python_plus_c_collecttype = \ - _generate_cpy_extern_python_collecttype - - def _extern_python_decl(self, tp, name, tag_and_space): - prnt = self._prnt - if isinstance(tp.result, model.VoidType): - size_of_result = '0' - else: - context = 'result of %s' % name - size_of_result = '(int)sizeof(%s)' % ( - tp.result.get_c_name('', context),) - prnt('static struct _cffi_externpy_s _cffi_externpy__%s =' % name) - prnt(' { "%s.%s", %s, 0, 0 };' % ( - self.module_name, name, size_of_result)) - prnt() - # - arguments = [] - context = 'argument of %s' % name - for i, type in enumerate(tp.args): - arg = type.get_c_name(' a%d' % i, context) - arguments.append(arg) - # - repr_arguments = ', '.join(arguments) - repr_arguments = repr_arguments or 'void' - name_and_arguments = '%s(%s)' % (name, repr_arguments) - if tp.abi == "__stdcall": - name_and_arguments = '_cffi_stdcall ' + name_and_arguments - # - def may_need_128_bits(tp): - return (isinstance(tp, model.PrimitiveType) and - tp.name == 'long double') - # - size_of_a = max(len(tp.args)*8, 8) - if may_need_128_bits(tp.result): - size_of_a = max(size_of_a, 16) - if isinstance(tp.result, model.StructOrUnion): - size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( - tp.result.get_c_name(''), size_of_a, - tp.result.get_c_name(''), size_of_a) - prnt('%s%s' % (tag_and_space, tp.result.get_c_name(name_and_arguments))) - prnt('{') - prnt(' char a[%s];' % size_of_a) - prnt(' char *p = a;') - for i, type in enumerate(tp.args): - arg = 'a%d' % i - if (isinstance(type, model.StructOrUnion) or - may_need_128_bits(type)): - arg = '&' + arg - type = model.PointerType(type) - prnt(' *(%s)(p + %d) = %s;' % (type.get_c_name('*'), i*8, arg)) - prnt(' _cffi_call_python(&_cffi_externpy__%s, p);' % name) - if not isinstance(tp.result, model.VoidType): - prnt(' return *(%s)p;' % (tp.result.get_c_name('*'),)) - prnt('}') - prnt() - self._num_externpy += 1 - - def _generate_cpy_extern_python_decl(self, tp, name): - self._extern_python_decl(tp, name, 'static ') - - def _generate_cpy_dllexport_python_decl(self, tp, name): - self._extern_python_decl(tp, name, 'CFFI_DLLEXPORT ') - - def _generate_cpy_extern_python_plus_c_decl(self, tp, name): - self._extern_python_decl(tp, name, '') - - def _generate_cpy_extern_python_ctx(self, tp, name): - if self.target_is_python: - raise VerificationError( - "cannot use 'extern \"Python\"' in the ABI mode") - if tp.ellipsis: - raise NotImplementedError("a vararg function is extern \"Python\"") - type_index = self._typesdict[tp] - type_op = CffiOp(OP_EXTERN_PYTHON, type_index) - self._lsts["global"].append( - GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) - - _generate_cpy_dllexport_python_ctx = \ - _generate_cpy_extern_python_plus_c_ctx = \ - _generate_cpy_extern_python_ctx - - def _print_string_literal_in_array(self, s): - prnt = self._prnt - prnt('// # NB. this is not a string because of a size limit in MSVC') - if not isinstance(s, bytes): # unicode - s = s.encode('utf-8') # -> bytes - else: - s.decode('utf-8') # got bytes, check for valid utf-8 - try: - s.decode('ascii') - except UnicodeDecodeError: - s = b'# -*- encoding: utf8 -*-\n' + s - for line in s.splitlines(True): - comment = line - if type('//') is bytes: # python2 - line = map(ord, line) # make a list of integers - else: # python3 - # type(line) is bytes, which enumerates like a list of integers - comment = ascii(comment)[1:-1] - prnt(('// ' + comment).rstrip()) - printed_line = '' - for c in line: - if len(printed_line) >= 76: - prnt(printed_line) - printed_line = '' - printed_line += '%d,' % (c,) - prnt(printed_line) - - # ---------- - # emitting the opcodes for individual types - - def _emit_bytecode_VoidType(self, tp, index): - self.cffi_types[index] = CffiOp(OP_PRIMITIVE, PRIM_VOID) - - def _emit_bytecode_PrimitiveType(self, tp, index): - prim_index = PRIMITIVE_TO_INDEX[tp.name] - self.cffi_types[index] = CffiOp(OP_PRIMITIVE, prim_index) - - def _emit_bytecode_UnknownIntegerType(self, tp, index): - s = ('_cffi_prim_int(sizeof(%s), (\n' - ' ((%s)-1) | 0 /* check that %s is an integer type */\n' - ' ) <= 0)' % (tp.name, tp.name, tp.name)) - self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s) - - def _emit_bytecode_UnknownFloatType(self, tp, index): - s = ('_cffi_prim_float(sizeof(%s) *\n' - ' (((%s)1) / 2) * 2 /* integer => 0, float => 1 */\n' - ' )' % (tp.name, tp.name)) - self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s) - - def _emit_bytecode_RawFunctionType(self, tp, index): - self.cffi_types[index] = CffiOp(OP_FUNCTION, self._typesdict[tp.result]) - index += 1 - for tp1 in tp.args: - realindex = self._typesdict[tp1] - if index != realindex: - if isinstance(tp1, model.PrimitiveType): - self._emit_bytecode_PrimitiveType(tp1, index) - else: - self.cffi_types[index] = CffiOp(OP_NOOP, realindex) - index += 1 - flags = int(tp.ellipsis) - if tp.abi is not None: - if tp.abi == '__stdcall': - flags |= 2 - else: - raise NotImplementedError("abi=%r" % (tp.abi,)) - self.cffi_types[index] = CffiOp(OP_FUNCTION_END, flags) - - def _emit_bytecode_PointerType(self, tp, index): - self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[tp.totype]) - - _emit_bytecode_ConstPointerType = _emit_bytecode_PointerType - _emit_bytecode_NamedPointerType = _emit_bytecode_PointerType - - def _emit_bytecode_FunctionPtrType(self, tp, index): - raw = tp.as_raw_function() - self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[raw]) - - def _emit_bytecode_ArrayType(self, tp, index): - item_index = self._typesdict[tp.item] - if tp.length is None: - self.cffi_types[index] = CffiOp(OP_OPEN_ARRAY, item_index) - elif tp.length == '...': - raise VerificationError( - "type %s badly placed: the '...' array length can only be " - "used on global arrays or on fields of structures" % ( - str(tp).replace('/*...*/', '...'),)) - else: - assert self.cffi_types[index + 1] == 'LEN' - self.cffi_types[index] = CffiOp(OP_ARRAY, item_index) - self.cffi_types[index + 1] = CffiOp(None, str(tp.length)) - - def _emit_bytecode_StructType(self, tp, index): - struct_index = self._struct_unions[tp] - self.cffi_types[index] = CffiOp(OP_STRUCT_UNION, struct_index) - _emit_bytecode_UnionType = _emit_bytecode_StructType - - def _emit_bytecode_EnumType(self, tp, index): - enum_index = self._enums[tp] - self.cffi_types[index] = CffiOp(OP_ENUM, enum_index) - - -if sys.version_info >= (3,): - NativeIO = io.StringIO -else: - class NativeIO(io.BytesIO): - def write(self, s): - if isinstance(s, unicode): - s = s.encode('ascii') - super(NativeIO, self).write(s) - -def _make_c_or_py_source(ffi, module_name, preamble, target_file, verbose): - if verbose: - print("generating %s" % (target_file,)) - recompiler = Recompiler(ffi, module_name, - target_is_python=(preamble is None)) - recompiler.collect_type_table() - recompiler.collect_step_tables() - f = NativeIO() - recompiler.write_source_to_f(f, preamble) - output = f.getvalue() - try: - with open(target_file, 'r') as f1: - if f1.read(len(output) + 1) != output: - raise IOError - if verbose: - print("(already up-to-date)") - return False # already up-to-date - except IOError: - tmp_file = '%s.~%d' % (target_file, os.getpid()) - with open(tmp_file, 'w') as f1: - f1.write(output) - try: - os.rename(tmp_file, target_file) - except OSError: - os.unlink(target_file) - os.rename(tmp_file, target_file) - return True - -def make_c_source(ffi, module_name, preamble, target_c_file, verbose=False): - assert preamble is not None - return _make_c_or_py_source(ffi, module_name, preamble, target_c_file, - verbose) - -def make_py_source(ffi, module_name, target_py_file, verbose=False): - return _make_c_or_py_source(ffi, module_name, None, target_py_file, - verbose) - -def _modname_to_file(outputdir, modname, extension): - parts = modname.split('.') - try: - os.makedirs(os.path.join(outputdir, *parts[:-1])) - except OSError: - pass - parts[-1] += extension - return os.path.join(outputdir, *parts), parts - - -# Aaargh. Distutils is not tested at all for the purpose of compiling -# DLLs that are not extension modules. Here are some hacks to work -# around that, in the _patch_for_*() functions... - -def _patch_meth(patchlist, cls, name, new_meth): - old = getattr(cls, name) - patchlist.append((cls, name, old)) - setattr(cls, name, new_meth) - return old - -def _unpatch_meths(patchlist): - for cls, name, old_meth in reversed(patchlist): - setattr(cls, name, old_meth) - -def _patch_for_embedding(patchlist): - if sys.platform == 'win32': - # we must not remove the manifest when building for embedding! - from distutils.msvc9compiler import MSVCCompiler - _patch_meth(patchlist, MSVCCompiler, '_remove_visual_c_ref', - lambda self, manifest_file: manifest_file) - - if sys.platform == 'darwin': - # we must not make a '-bundle', but a '-dynamiclib' instead - from distutils.ccompiler import CCompiler - def my_link_shared_object(self, *args, **kwds): - if '-bundle' in self.linker_so: - self.linker_so = list(self.linker_so) - i = self.linker_so.index('-bundle') - self.linker_so[i] = '-dynamiclib' - return old_link_shared_object(self, *args, **kwds) - old_link_shared_object = _patch_meth(patchlist, CCompiler, - 'link_shared_object', - my_link_shared_object) - -def _patch_for_target(patchlist, target): - from distutils.command.build_ext import build_ext - # if 'target' is different from '*', we need to patch some internal - # method to just return this 'target' value, instead of having it - # built from module_name - if target.endswith('.*'): - target = target[:-2] - if sys.platform == 'win32': - target += '.dll' - elif sys.platform == 'darwin': - target += '.dylib' - else: - target += '.so' - _patch_meth(patchlist, build_ext, 'get_ext_filename', - lambda self, ext_name: target) - - -def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, - c_file=None, source_extension='.c', extradir=None, - compiler_verbose=1, target=None, debug=None, **kwds): - if not isinstance(module_name, str): - module_name = module_name.encode('ascii') - if ffi._windows_unicode: - ffi._apply_windows_unicode(kwds) - if preamble is not None: - embedding = (ffi._embedding is not None) - if embedding: - ffi._apply_embedding_fix(kwds) - if c_file is None: - c_file, parts = _modname_to_file(tmpdir, module_name, - source_extension) - if extradir: - parts = [extradir] + parts - ext_c_file = os.path.join(*parts) - else: - ext_c_file = c_file - # - if target is None: - if embedding: - target = '%s.*' % module_name - else: - target = '*' - # - ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) - updated = make_c_source(ffi, module_name, preamble, c_file, - verbose=compiler_verbose) - if call_c_compiler: - patchlist = [] - cwd = os.getcwd() - try: - if embedding: - _patch_for_embedding(patchlist) - if target != '*': - _patch_for_target(patchlist, target) - if compiler_verbose: - if tmpdir == '.': - msg = 'the current directory is' - else: - msg = 'setting the current directory to' - print('%s %r' % (msg, os.path.abspath(tmpdir))) - os.chdir(tmpdir) - outputfilename = ffiplatform.compile('.', ext, - compiler_verbose, debug) - finally: - os.chdir(cwd) - _unpatch_meths(patchlist) - return outputfilename - else: - return ext, updated - else: - if c_file is None: - c_file, _ = _modname_to_file(tmpdir, module_name, '.py') - updated = make_py_source(ffi, module_name, c_file, - verbose=compiler_verbose) - if call_c_compiler: - return c_file - else: - return None, updated - diff --git a/spaces/avivdm1/AutoGPT/autogpt/promptgenerator.py b/spaces/avivdm1/AutoGPT/autogpt/promptgenerator.py deleted file mode 100644 index 0ad7046a0c41dab356abcd0151b65890e5544cd2..0000000000000000000000000000000000000000 --- a/spaces/avivdm1/AutoGPT/autogpt/promptgenerator.py +++ /dev/null @@ -1,138 +0,0 @@ -""" A module for generating custom prompt strings.""" -from __future__ import annotations - -import json -from typing import Any - - -class PromptGenerator: - """ - A class for generating custom prompt strings based on constraints, commands, - resources, and performance evaluations. - """ - - def __init__(self) -> None: - """ - Initialize the PromptGenerator object with empty lists of constraints, - commands, resources, and performance evaluations. - """ - self.constraints = [] - self.commands = [] - self.resources = [] - self.performance_evaluation = [] - self.response_format = { - "thoughts": { - "text": "thought", - "reasoning": "reasoning", - "plan": "- short bulleted\n- list that conveys\n- long-term plan", - "criticism": "constructive self-criticism", - "speak": "thoughts summary to say to user", - }, - "command": {"name": "command name", "args": {"arg name": "value"}}, - } - - def add_constraint(self, constraint: str) -> None: - """ - Add a constraint to the constraints list. - - Args: - constraint (str): The constraint to be added. - """ - self.constraints.append(constraint) - - def add_command(self, command_label: str, command_name: str, args=None) -> None: - """ - Add a command to the commands list with a label, name, and optional arguments. - - Args: - command_label (str): The label of the command. - command_name (str): The name of the command. - args (dict, optional): A dictionary containing argument names and their - values. Defaults to None. - """ - if args is None: - args = {} - - command_args = {arg_key: arg_value for arg_key, arg_value in args.items()} - - command = { - "label": command_label, - "name": command_name, - "args": command_args, - } - - self.commands.append(command) - - def _generate_command_string(self, command: dict[str, Any]) -> str: - """ - Generate a formatted string representation of a command. - - Args: - command (dict): A dictionary containing command information. - - Returns: - str: The formatted command string. - """ - args_string = ", ".join( - f'"{key}": "{value}"' for key, value in command["args"].items() - ) - return f'{command["label"]}: "{command["name"]}", args: {args_string}' - - def add_resource(self, resource: str) -> None: - """ - Add a resource to the resources list. - - Args: - resource (str): The resource to be added. - """ - self.resources.append(resource) - - def add_performance_evaluation(self, evaluation: str) -> None: - """ - Add a performance evaluation item to the performance_evaluation list. - - Args: - evaluation (str): The evaluation item to be added. - """ - self.performance_evaluation.append(evaluation) - - def _generate_numbered_list(self, items: list[Any], item_type="list") -> str: - """ - Generate a numbered list from given items based on the item_type. - - Args: - items (list): A list of items to be numbered. - item_type (str, optional): The type of items in the list. - Defaults to 'list'. - - Returns: - str: The formatted numbered list. - """ - if item_type == "command": - return "\n".join( - f"{i+1}. {self._generate_command_string(item)}" - for i, item in enumerate(items) - ) - else: - return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items)) - - def generate_prompt_string(self) -> str: - """ - Generate a prompt string based on the constraints, commands, resources, - and performance evaluations. - - Returns: - str: The generated prompt string. - """ - formatted_response_format = json.dumps(self.response_format, indent=4) - return ( - f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n" - "Commands:\n" - f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n" - f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n" - "Performance Evaluation:\n" - f"{self._generate_numbered_list(self.performance_evaluation)}\n\n" - "You should only respond in JSON format as described below \nResponse" - f" Format: \n{formatted_response_format} \nEnsure the response can be" - " parsed by Python json.loads" - ) diff --git a/spaces/awacke1/1-SimPhysics/style.css b/spaces/awacke1/1-SimPhysics/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/awacke1/1-SimPhysics/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/awacke1/AW-06-SL-AI-Image-Music-Video-UI-UX-URL/app.py b/spaces/awacke1/AW-06-SL-AI-Image-Music-Video-UI-UX-URL/app.py deleted file mode 100644 index 0f4298365bc4f58d285202fb9442e12805d2db95..0000000000000000000000000000000000000000 --- a/spaces/awacke1/AW-06-SL-AI-Image-Music-Video-UI-UX-URL/app.py +++ /dev/null @@ -1,45 +0,0 @@ -import streamlit as st -import gradio as gr -import IPython -import streamlit as st -import streamlit.components.v1 as components -from IPython.display import IFrame - -src='' # URL parameter to change the iframe url -def SetIframeURL(option_selected): - if (option_selected=='Collager'): - src='https://www.artbreeder.com/' - if (option_selected=='Midjourney'): - src='https://www.midjourney.com/' - if (option_selected=='DreamStudio'): - src='https://beta.dreamstudio.ai/' - if (option_selected=='NightCafe'): - src='https://creator.nightcafe.studio/' - if (option_selected=='RunwayML'): - src='https://app.runwayml.com/' - if (option_selected=='ArtFromTextandImages'): - src='https://huggingface.co/spaces/awacke1/Art-from-Text-and-Images' - if (option_selected=='Boomy'): - src='https://boomy.com/' - - width = st.sidebar.slider("Width", 200, 1500, 800, 100) - height = st.sidebar.slider("Height", 200, 1500, 900, 100) - st.components.v1.iframe(src, width, height, scrolling=True) - -try: - options = ['Midjourney', 'RunwayML', 'Boomy'] - query_params = st.experimental_get_query_params() - query_option = query_params['option'][0] #throws an exception when visiting http://host:port - option_selected = st.sidebar.selectbox('Pick option', options, index=options.index(query_option)) - if option_selected: - st.experimental_set_query_params(option=option_selected) - SetIframeURL(option_selected) -except: - options = ['Midjourney', 'RunwayML', 'Boomy'] - st.experimental_set_query_params(option=options[1]) # defaults to 1 - query_params = st.experimental_get_query_params() - query_option = query_params['option'][0] - option_selected = st.sidebar.selectbox('Pick option', options, index=options.index(query_option)) - if option_selected: - st.experimental_set_query_params(option=option_selected) - SetIframeURL(option_selected) \ No newline at end of file diff --git a/spaces/awacke1/BlackjackSimulatorCardGameAI/README.md b/spaces/awacke1/BlackjackSimulatorCardGameAI/README.md deleted file mode 100644 index f08dbfcbd089cbef409af11f3697a70f9d3faf86..0000000000000000000000000000000000000000 --- a/spaces/awacke1/BlackjackSimulatorCardGameAI/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: 🃏♠Blackjack Card Game AI2️⃣1️⃣SL -emoji: ♠2️⃣1️⃣🃏 -colorFrom: purple -colorTo: red -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false ---- - -Game theory - the longer you play the more strategies matter: Reference Liv Boeree on Game theory: https://youtu.be/eF-E40pxxbI?t=125 diff --git a/spaces/awacke1/HEDIS.Roster.Dash.Component.SDOH/README.md b/spaces/awacke1/HEDIS.Roster.Dash.Component.SDOH/README.md deleted file mode 100644 index 1303a0ec742445c130343903dcbb1e41b665cf55..0000000000000000000000000000000000000000 --- a/spaces/awacke1/HEDIS.Roster.Dash.Component.SDOH/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: HEDIS.Roster.Dash.Component.SDOH -emoji: 🌍 -colorFrom: blue -colorTo: red -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/Video-View-Download/README.md b/spaces/awacke1/Video-View-Download/README.md deleted file mode 100644 index 6de5fc8e89f36ab262c701fb99c0f72b0e10ac7c..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Video-View-Download/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 📺💾Video Play and Download -emoji: 📺▶️💾 -colorFrom: indigo -colorTo: pink -sdk: streamlit -sdk_version: 1.2.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/badayvedat/AudioSep/models/CLAP/training/logger.py b/spaces/badayvedat/AudioSep/models/CLAP/training/logger.py deleted file mode 100644 index ac4634970fae6aacde2b7b808355dbd50c90ce73..0000000000000000000000000000000000000000 --- a/spaces/badayvedat/AudioSep/models/CLAP/training/logger.py +++ /dev/null @@ -1,30 +0,0 @@ -import logging - - -def setup_logging(log_file, level, include_host=False): - if include_host: - import socket - - hostname = socket.gethostname() - formatter = logging.Formatter( - f"%(asctime)s | {hostname} | %(levelname)s | %(message)s", - datefmt="%Y-%m-%d,%H:%M:%S", - ) - else: - formatter = logging.Formatter( - "%(asctime)s | %(levelname)s | %(message)s", datefmt="%Y-%m-%d,%H:%M:%S" - ) - - logging.root.setLevel(level) - loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict] - for logger in loggers: - logger.setLevel(level) - - stream_handler = logging.StreamHandler() - stream_handler.setFormatter(formatter) - logging.root.addHandler(stream_handler) - - if log_file: - file_handler = logging.FileHandler(filename=log_file) - file_handler.setFormatter(formatter) - logging.root.addHandler(file_handler) diff --git a/spaces/banana-projects/datasets-card-creator/build/static/js/runtime-main.73e65ee8.js b/spaces/banana-projects/datasets-card-creator/build/static/js/runtime-main.73e65ee8.js deleted file mode 100644 index 4d4d38854ea06e38fa777271f9438c4ef655a582..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/datasets-card-creator/build/static/js/runtime-main.73e65ee8.js +++ /dev/null @@ -1,2 +0,0 @@ -!function(e){function t(t){for(var n,u,i=t[0],c=t[1],l=t[2],s=0,p=[];s 0.0) {", - "gl_FragColor.rgb += (average - gl_FragColor.rgb) * (1.0 - 1.0 / (1.001 - saturation));", - "} else {", - "gl_FragColor.rgb += (average - gl_FragColor.rgb) * (-saturation);", - "}", - - "}" - - ].join( "\n" ) - -}; diff --git a/spaces/banana-projects/web3d/node_modules/three/src/polyfills.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/polyfills.d.ts deleted file mode 100644 index bb8821c6e550a5e2ee5f332a62e9cdf37af5a762..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/src/polyfills.d.ts +++ /dev/null @@ -1,16 +0,0 @@ -// log handlers -export function warn(message?: any, ...optionalParams: any[]): void; -export function error(message?: any, ...optionalParams: any[]): void; -export function log(message?: any, ...optionalParams: any[]): void; - -// typed array parameters -export type TypedArray = - | Int8Array - | Uint8Array - | Uint8ClampedArray - | Int16Array - | Uint16Array - | Int32Array - | Uint32Array - | Float32Array - | Float64Array; diff --git a/spaces/bigjoker/stable-diffusion-webui/modules/textual_inversion/logging.py b/spaces/bigjoker/stable-diffusion-webui/modules/textual_inversion/logging.py deleted file mode 100644 index b2c01f0a4ef6666c0c2e1147dbee9d6850d277c0..0000000000000000000000000000000000000000 --- a/spaces/bigjoker/stable-diffusion-webui/modules/textual_inversion/logging.py +++ /dev/null @@ -1,24 +0,0 @@ -import datetime -import json -import os - -saved_params_shared = {"model_name", "model_hash", "initial_step", "num_of_dataset_images", "learn_rate", "batch_size", "clip_grad_mode", "clip_grad_value", "gradient_step", "data_root", "log_directory", "training_width", "training_height", "steps", "create_image_every", "template_file", "gradient_step", "latent_sampling_method"} -saved_params_ti = {"embedding_name", "num_vectors_per_token", "save_embedding_every", "save_image_with_stored_embedding"} -saved_params_hypernet = {"hypernetwork_name", "layer_structure", "activation_func", "weight_init", "add_layer_norm", "use_dropout", "save_hypernetwork_every"} -saved_params_all = saved_params_shared | saved_params_ti | saved_params_hypernet -saved_params_previews = {"preview_prompt", "preview_negative_prompt", "preview_steps", "preview_sampler_index", "preview_cfg_scale", "preview_seed", "preview_width", "preview_height"} - - -def save_settings_to_file(log_directory, all_params): - now = datetime.datetime.now() - params = {"datetime": now.strftime("%Y-%m-%d %H:%M:%S")} - - keys = saved_params_all - if all_params.get('preview_from_txt2img'): - keys = keys | saved_params_previews - - params.update({k: v for k, v in all_params.items() if k in keys}) - - filename = f'settings-{now.strftime("%Y-%m-%d-%H-%M-%S")}.json' - with open(os.path.join(log_directory, filename), "w") as file: - json.dump(params, file, indent=4) diff --git a/spaces/bingbing520/ChatGPT2/modules/config.py b/spaces/bingbing520/ChatGPT2/modules/config.py deleted file mode 100644 index ead88f51bbcf28b151ecc5d9fa068f12091efeaa..0000000000000000000000000000000000000000 --- a/spaces/bingbing520/ChatGPT2/modules/config.py +++ /dev/null @@ -1,173 +0,0 @@ -from collections import defaultdict -from contextlib import contextmanager -import os -import logging -import sys -import commentjson as json - -from . import shared -from . import presets - - -__all__ = [ - "my_api_key", - "authflag", - "auth_list", - "dockerflag", - "retrieve_proxy", - "log_level", - "advance_docs", - "update_doc_config", - "multi_api_key", - "server_name", - "server_port", - "share", -] - -# 添加一个统一的config文件,避免文件过多造成的疑惑(优先级最低) -# 同时,也可以为后续支持自定义功能提供config的帮助 -if os.path.exists("config.json"): - with open("config.json", "r", encoding='utf-8') as f: - config = json.load(f) -else: - config = {} - -lang_config = config.get("language", "auto") -language = os.environ.get("LANGUAGE", lang_config) - -if os.path.exists("api_key.txt"): - logging.info("检测到api_key.txt文件,正在进行迁移...") - with open("api_key.txt", "r") as f: - config["openai_api_key"] = f.read().strip() - os.rename("api_key.txt", "api_key(deprecated).txt") - with open("config.json", "w", encoding='utf-8') as f: - json.dump(config, f, indent=4) - -if os.path.exists("auth.json"): - logging.info("检测到auth.json文件,正在进行迁移...") - auth_list = [] - with open("auth.json", "r", encoding='utf-8') as f: - auth = json.load(f) - for _ in auth: - if auth[_]["username"] and auth[_]["password"]: - auth_list.append((auth[_]["username"], auth[_]["password"])) - else: - logging.error("请检查auth.json文件中的用户名和密码!") - sys.exit(1) - config["users"] = auth_list - os.rename("auth.json", "auth(deprecated).json") - with open("config.json", "w", encoding='utf-8') as f: - json.dump(config, f, indent=4) - -## 处理docker if we are running in Docker -dockerflag = config.get("dockerflag", False) -if os.environ.get("dockerrun") == "yes": - dockerflag = True - -## 处理 api-key 以及 允许的用户列表 -my_api_key = config.get("openai_api_key", "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") -my_api_key = os.environ.get("OPENAI_API_KEY", my_api_key) - -xmchat_api_key = config.get("xmchat_api_key", "") -if os.environ.get("XMCHAT_API_KEY", None) == None: - os.environ["XMCHAT_API_KEY"] = xmchat_api_key - -## 多账户机制 -multi_api_key = config.get("multi_api_key", False) # 是否开启多账户机制 -if multi_api_key: - api_key_list = config.get("api_key_list", []) - if len(api_key_list) == 0: - logging.error("多账号模式已开启,但api_key_list为空,请检查config.json") - sys.exit(1) - shared.state.set_api_key_queue(api_key_list) - -auth_list = config.get("users", []) # 实际上是使用者的列表 -authflag = len(auth_list) > 0 # 是否开启认证的状态值,改为判断auth_list长度 - -# 处理自定义的api_host,优先读环境变量的配置,如果存在则自动装配 -api_host = os.environ.get("api_host", config.get("api_host", "")) -if api_host: - shared.state.set_api_host(api_host) - -@contextmanager -def retrieve_openai_api(api_key = None): - old_api_key = os.environ.get("OPENAI_API_KEY", "") - if api_key is None: - os.environ["OPENAI_API_KEY"] = my_api_key - yield my_api_key - else: - os.environ["OPENAI_API_KEY"] = api_key - yield api_key - os.environ["OPENAI_API_KEY"] = old_api_key - -## 处理log -log_level = config.get("log_level", "INFO") -logging.basicConfig( - level=log_level, - format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s", -) - -## 处理代理: -http_proxy = config.get("http_proxy", "") -https_proxy = config.get("https_proxy", "") -http_proxy = os.environ.get("HTTP_PROXY", http_proxy) -https_proxy = os.environ.get("HTTPS_PROXY", https_proxy) - -# 重置系统变量,在不需要设置的时候不设置环境变量,以免引起全局代理报错 -os.environ["HTTP_PROXY"] = "" -os.environ["HTTPS_PROXY"] = "" - -local_embedding = config.get("local_embedding", False) # 是否使用本地embedding - -@contextmanager -def retrieve_proxy(proxy=None): - """ - 1, 如果proxy = NONE,设置环境变量,并返回最新设置的代理 - 2,如果proxy != NONE,更新当前的代理配置,但是不更新环境变量 - """ - global http_proxy, https_proxy - if proxy is not None: - http_proxy = proxy - https_proxy = proxy - yield http_proxy, https_proxy - else: - old_var = os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] - os.environ["HTTP_PROXY"] = http_proxy - os.environ["HTTPS_PROXY"] = https_proxy - yield http_proxy, https_proxy # return new proxy - - # return old proxy - os.environ["HTTP_PROXY"], os.environ["HTTPS_PROXY"] = old_var - - -## 处理advance docs -advance_docs = defaultdict(lambda: defaultdict(dict)) -advance_docs.update(config.get("advance_docs", {})) -def update_doc_config(two_column_pdf): - global advance_docs - advance_docs["pdf"]["two_column"] = two_column_pdf - - logging.info(f"更新后的文件参数为:{advance_docs}") - -## 处理gradio.launch参数 -server_name = config.get("server_name", None) -server_port = config.get("server_port", None) -if server_name is None: - if dockerflag: - server_name = "0.0.0.0" - else: - server_name = "127.0.0.1" -if server_port is None: - if dockerflag: - server_port = 7860 - -assert server_port is None or type(server_port) == int, "要求port设置为int类型" - -# 设置默认model -default_model = config.get("default_model", "") -try: - presets.DEFAULT_MODEL = presets.MODELS.index(default_model) -except ValueError: - pass - -share = config.get("share", False) diff --git a/spaces/bioriAsaeru/text-to-voice/AUTODESK.AUTOCAD.CIVIL3D.V2016.WIN64-ISO.md b/spaces/bioriAsaeru/text-to-voice/AUTODESK.AUTOCAD.CIVIL3D.V2016.WIN64-ISO.md deleted file mode 100644 index b4581ca948173c7963448f4621277919e36c24dc..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/AUTODESK.AUTOCAD.CIVIL3D.V2016.WIN64-ISO.md +++ /dev/null @@ -1,6 +0,0 @@ -

AUTODESK.AUTOCAD.CIVIL3D.V2016.WIN64-ISO


Download >>> https://urloso.com/2uyPIh



- -Download Autodesk AutoCAD Civil 3D for Windows to analyze and design your civil engineering ... Microsoft Project Professional 2016 (64-Bit). 1fdad05405
-
-
-

diff --git a/spaces/bioriAsaeru/text-to-voice/Asa 5505 Activation Key Generator.md b/spaces/bioriAsaeru/text-to-voice/Asa 5505 Activation Key Generator.md deleted file mode 100644 index 58389c8dd17dab8713efdfccf35807dbcdba5226..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Asa 5505 Activation Key Generator.md +++ /dev/null @@ -1,8 +0,0 @@ - -

the license-activation-type command is used to specify the license type for a feature. for example, the license-activation-type command is used to indicate that you want to activate a feature using a license key of type l-key2-periodic. the activate-license-type command is used to permanently activate a feature, regardless of the type of license that the asa has for the feature. the following example shows the output of the show license-activation-type command:

-

Asa 5505 Activation Key Generator


Download 🗸 https://urloso.com/2uyPFC



-

the license-activation-type command is used to indicate the type of license key that you want to activate a feature. if the feature is available, the license-activation-type command permanently activates the feature. for example, the license-activation-type command is used to indicate that you want to activate a feature using a license key of type l-key2-periodic. if the feature is unavailable, the license-activation-type command prompts you for the feature's license key and the license type. the following example shows the output of the show license-activation-type command:

-

the license-activation-type command is used to indicate the type of license key that you want to activate a feature. the license-activation-type command permanently activates the feature. the license-activation-type command is not available until you apply a license; you can't use it without a license. the following example shows the output of the show license-activation-type command:

-

the license-activation-type command permanently activates a feature, regardless of the type of license that the asa has for the feature. the activate-license-type command is used to permanently activate a feature, regardless of the type of license that the asa has for the feature. the activate-license-type command prompts you for the feature's feature key and the license type:

899543212b
-
-
\ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/Iso Iec 27014 Pdf Download What You Need to Know About Information Security Cybersecurity and Privacy Protection.md b/spaces/bioriAsaeru/text-to-voice/Iso Iec 27014 Pdf Download What You Need to Know About Information Security Cybersecurity and Privacy Protection.md deleted file mode 100644 index 0404b31356fc7b9c92c0aa44e5fc1fac8852a2af..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Iso Iec 27014 Pdf Download What You Need to Know About Information Security Cybersecurity and Privacy Protection.md +++ /dev/null @@ -1,6 +0,0 @@ -

Iso Iec 27014 Pdf Download


Downloadhttps://urloso.com/2uyPP1



-
- aaccfb2cb3
-
-
-

diff --git a/spaces/blaziant/ysda_nlp_ops_update/README.md b/spaces/blaziant/ysda_nlp_ops_update/README.md deleted file mode 100644 index ce66094f961fd7bd128df752bfe27654782da967..0000000000000000000000000000000000000000 --- a/spaces/blaziant/ysda_nlp_ops_update/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Ysda Nlp Ops Update -emoji: 🚀 -colorFrom: pink -colorTo: pink -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/bochen0909/speech-to-speech-translation-audio-course/app.py b/spaces/bochen0909/speech-to-speech-translation-audio-course/app.py deleted file mode 100644 index d932dea614eeabdacde84d99d4e1482a5131f63f..0000000000000000000000000000000000000000 --- a/spaces/bochen0909/speech-to-speech-translation-audio-course/app.py +++ /dev/null @@ -1,107 +0,0 @@ -import gradio as gr -import numpy as np -import torch -from datasets import load_dataset -import librosa -from transformers import SpeechT5ForTextToSpeech, SpeechT5HifiGan, SpeechT5Processor, pipeline -from transformers import WhisperProcessor, WhisperForConditionalGeneration - - -device = "cuda:0" if torch.cuda.is_available() else "cpu" - -# load speech translation checkpoint -# asr_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device) -asr_processor = WhisperProcessor.from_pretrained("openai/whisper-base") -asr_model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-base").to(device) -asr_forced_decoder_ids = asr_processor.get_decoder_prompt_ids(language="dutch", task="transcribe") - - - -# load text-to-speech checkpoint and speaker embeddings -if 0: - processor = SpeechT5Processor.from_pretrained("sanchit-gandhi/speecht5_tts_vox_nl") - - model = SpeechT5ForTextToSpeech.from_pretrained("sanchit-gandhi/speecht5_tts_vox_nl").to(device) -if 1: - from transformers import VitsModel, VitsTokenizer - model = VitsModel.from_pretrained("Matthijs/mms-tts-fra") - tokenizer = VitsTokenizer.from_pretrained("Matthijs/mms-tts-fra") - -vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device) - -embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") -speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) - - -def translate(audio): - if 0: - outputs = asr_pipe(audio, max_new_tokens=256, generate_kwargs={"language":"dutch", "task":"transcribe"}) - return outputs["text"] - else: - - x, sr = librosa.load(audio) - input_features = asr_processor(x, sampling_rate=16000, return_tensors="pt").input_features - predicted_ids = asr_model.generate(input_features, forced_decoder_ids=asr_forced_decoder_ids) - # decode token ids to text - transcription = asr_processor.batch_decode(predicted_ids, skip_special_tokens=True) - return transcription - - - -def synthesise(text): - if 0: - inputs = processor(text=text, return_tensors="pt") - speech = model.generate_speech(inputs["input_ids"].to(device), speaker_embeddings.to(device), vocoder=vocoder) - return speech.cpu() - if 1: - inputs = tokenizer(text, return_tensors="pt") - input_ids = inputs["input_ids"] - - - with torch.no_grad(): - outputs = model(input_ids) - - speech = outputs.audio[0] - return speech.cpu() - - -def speech_to_speech_translation(audio): - translated_text = translate(audio) - print(translated_text) - synthesised_speech = synthesise(translated_text) - synthesised_speech = (synthesised_speech.numpy() * 32767).astype(np.int16) - return 16000, synthesised_speech - - -title = "Cascaded STST" -description = """ -Demo for cascaded speech-to-speech translation (STST), mapping from source speech in any language to target speech in Dutch. Demo uses OpenAI's [Whisper Base](https://huggingface.co/openai/whisper-base) model for speech translation, and Microsoft's -[SpeechT5 TTS](https://huggingface.co/microsoft/speecht5_tts) model for text-to-speech: - -![Cascaded STST](https://huggingface.co/datasets/huggingface-course/audio-course-images/resolve/main/s2st_cascaded.png "Diagram of cascaded speech to speech translation") -""" - -demo = gr.Blocks() - -mic_translate = gr.Interface( - fn=speech_to_speech_translation, - inputs=gr.Audio(source="microphone", type="filepath"), - outputs=gr.Audio(label="Generated Speech", type="numpy"), - title=title, - description=description, -) - -file_translate = gr.Interface( - fn=speech_to_speech_translation, - inputs=gr.Audio(source="upload", type="filepath"), - outputs=gr.Audio(label="Generated Speech", type="numpy"), - examples=[["./example.wav"]], - title=title, - description=description, -) - -with demo: - gr.TabbedInterface([mic_translate, file_translate], ["Microphone", "Audio File"]) - - -demo.launch() \ No newline at end of file diff --git a/spaces/brjathu/HMR2.0/hmr2/utils/render_openpose.py b/spaces/brjathu/HMR2.0/hmr2/utils/render_openpose.py deleted file mode 100644 index a4a26e3261a35d1a8ac334cb4358e4484c20decd..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/hmr2/utils/render_openpose.py +++ /dev/null @@ -1,149 +0,0 @@ -""" -Render OpenPose keypoints. -Code was ported to Python from the official C++ implementation https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/utilities/keypoint.cpp -""" -import cv2 -import math -import numpy as np -from typing import List, Tuple - -def get_keypoints_rectangle(keypoints: np.array, threshold: float) -> Tuple[float, float, float]: - """ - Compute rectangle enclosing keypoints above the threshold. - Args: - keypoints (np.array): Keypoint array of shape (N, 3). - threshold (float): Confidence visualization threshold. - Returns: - Tuple[float, float, float]: Rectangle width, height and area. - """ - valid_ind = keypoints[:, -1] > threshold - if valid_ind.sum() > 0: - valid_keypoints = keypoints[valid_ind][:, :-1] - max_x = valid_keypoints[:,0].max() - max_y = valid_keypoints[:,1].max() - min_x = valid_keypoints[:,0].min() - min_y = valid_keypoints[:,1].min() - width = max_x - min_x - height = max_y - min_y - area = width * height - return width, height, area - else: - return 0,0,0 - -def render_keypoints(img: np.array, - keypoints: np.array, - pairs: List, - colors: List, - thickness_circle_ratio: float, - thickness_line_ratio_wrt_circle: float, - pose_scales: List, - threshold: float = 0.1) -> np.array: - """ - Render keypoints on input image. - Args: - img (np.array): Input image of shape (H, W, 3) with pixel values in the [0,255] range. - keypoints (np.array): Keypoint array of shape (N, 3). - pairs (List): List of keypoint pairs per limb. - colors: (List): List of colors per keypoint. - thickness_circle_ratio (float): Circle thickness ratio. - thickness_line_ratio_wrt_circle (float): Line thickness ratio wrt the circle. - pose_scales (List): List of pose scales. - threshold (float): Only visualize keypoints with confidence above the threshold. - Returns: - (np.array): Image of shape (H, W, 3) with keypoints drawn on top of the original image. - """ - img_orig = img.copy() - width, height = img.shape[1], img.shape[2] - area = width * height - - lineType = 8 - shift = 0 - numberColors = len(colors) - thresholdRectangle = 0.1 - - person_width, person_height, person_area = get_keypoints_rectangle(keypoints, thresholdRectangle) - if person_area > 0: - ratioAreas = min(1, max(person_width / width, person_height / height)) - thicknessRatio = np.maximum(np.round(math.sqrt(area) * thickness_circle_ratio * ratioAreas), 2) - thicknessCircle = np.maximum(1, thicknessRatio if ratioAreas > 0.05 else -np.ones_like(thicknessRatio)) - thicknessLine = np.maximum(1, np.round(thicknessRatio * thickness_line_ratio_wrt_circle)) - radius = thicknessRatio / 2 - - img = np.ascontiguousarray(img.copy()) - for i, pair in enumerate(pairs): - index1, index2 = pair - if keypoints[index1, -1] > threshold and keypoints[index2, -1] > threshold: - thicknessLineScaled = int(round(min(thicknessLine[index1], thicknessLine[index2]) * pose_scales[0])) - colorIndex = index2 - color = colors[colorIndex % numberColors] - keypoint1 = keypoints[index1, :-1].astype(np.int) - keypoint2 = keypoints[index2, :-1].astype(np.int) - cv2.line(img, tuple(keypoint1.tolist()), tuple(keypoint2.tolist()), tuple(color.tolist()), thicknessLineScaled, lineType, shift) - for part in range(len(keypoints)): - faceIndex = part - if keypoints[faceIndex, -1] > threshold: - radiusScaled = int(round(radius[faceIndex] * pose_scales[0])) - thicknessCircleScaled = int(round(thicknessCircle[faceIndex] * pose_scales[0])) - colorIndex = part - color = colors[colorIndex % numberColors] - center = keypoints[faceIndex, :-1].astype(np.int) - cv2.circle(img, tuple(center.tolist()), radiusScaled, tuple(color.tolist()), thicknessCircleScaled, lineType, shift) - return img - -def render_body_keypoints(img: np.array, - body_keypoints: np.array) -> np.array: - """ - Render OpenPose body keypoints on input image. - Args: - img (np.array): Input image of shape (H, W, 3) with pixel values in the [0,255] range. - body_keypoints (np.array): Keypoint array of shape (N, 3); 3 <====> (x, y, confidence). - Returns: - (np.array): Image of shape (H, W, 3) with keypoints drawn on top of the original image. - """ - - thickness_circle_ratio = 1./75. * np.ones(body_keypoints.shape[0]) - thickness_line_ratio_wrt_circle = 0.75 - pairs = [] - pairs = [1,8,1,2,1,5,2,3,3,4,5,6,6,7,8,9,9,10,10,11,8,12,12,13,13,14,1,0,0,15,15,17,0,16,16,18,14,19,19,20,14,21,11,22,22,23,11,24] - pairs = np.array(pairs).reshape(-1,2) - colors = [255., 0., 85., - 255., 0., 0., - 255., 85., 0., - 255., 170., 0., - 255., 255., 0., - 170., 255., 0., - 85., 255., 0., - 0., 255., 0., - 255., 0., 0., - 0., 255., 85., - 0., 255., 170., - 0., 255., 255., - 0., 170., 255., - 0., 85., 255., - 0., 0., 255., - 255., 0., 170., - 170., 0., 255., - 255., 0., 255., - 85., 0., 255., - 0., 0., 255., - 0., 0., 255., - 0., 0., 255., - 0., 255., 255., - 0., 255., 255., - 0., 255., 255.] - colors = np.array(colors).reshape(-1,3) - pose_scales = [1] - return render_keypoints(img, body_keypoints, pairs, colors, thickness_circle_ratio, thickness_line_ratio_wrt_circle, pose_scales, 0.1) - -def render_openpose(img: np.array, - body_keypoints: np.array) -> np.array: - """ - Render keypoints in the OpenPose format on input image. - Args: - img (np.array): Input image of shape (H, W, 3) with pixel values in the [0,255] range. - body_keypoints (np.array): Keypoint array of shape (N, 3); 3 <====> (x, y, confidence). - Returns: - (np.array): Image of shape (H, W, 3) with keypoints drawn on top of the original image. - """ - img = render_body_keypoints(img, body_keypoints) - return img diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/config/compat.py b/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/config/compat.py deleted file mode 100644 index 11a08c439bf14defd880e37a938fab8a08e68eeb..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/config/compat.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -""" -Backward compatibility of configs. - -Instructions to bump version: -+ It's not needed to bump version if new keys are added. - It's only needed when backward-incompatible changes happen - (i.e., some existing keys disappear, or the meaning of a key changes) -+ To bump version, do the following: - 1. Increment _C.VERSION in defaults.py - 2. Add a converter in this file. - - Each ConverterVX has a function "upgrade" which in-place upgrades config from X-1 to X, - and a function "downgrade" which in-place downgrades config from X to X-1 - - In each function, VERSION is left unchanged. - - Each converter assumes that its input has the relevant keys - (i.e., the input is not a partial config). - 3. Run the tests (test_config.py) to make sure the upgrade & downgrade - functions are consistent. -""" - -import logging -from typing import List, Optional, Tuple - -from .config import CfgNode as CN -from .defaults import _C - -__all__ = ["upgrade_config", "downgrade_config"] - - -def upgrade_config(cfg: CN, to_version: Optional[int] = None) -> CN: - """ - Upgrade a config from its current version to a newer version. - - Args: - cfg (CfgNode): - to_version (int): defaults to the latest version. - """ - cfg = cfg.clone() - if to_version is None: - to_version = _C.VERSION - - assert cfg.VERSION <= to_version, "Cannot upgrade from v{} to v{}!".format( - cfg.VERSION, to_version - ) - for k in range(cfg.VERSION, to_version): - converter = globals()["ConverterV" + str(k + 1)] - converter.upgrade(cfg) - cfg.VERSION = k + 1 - return cfg - - -def downgrade_config(cfg: CN, to_version: int) -> CN: - """ - Downgrade a config from its current version to an older version. - - Args: - cfg (CfgNode): - to_version (int): - - Note: - A general downgrade of arbitrary configs is not always possible due to the - different functionalities in different versions. - The purpose of downgrade is only to recover the defaults in old versions, - allowing it to load an old partial yaml config. - Therefore, the implementation only needs to fill in the default values - in the old version when a general downgrade is not possible. - """ - cfg = cfg.clone() - assert cfg.VERSION >= to_version, "Cannot downgrade from v{} to v{}!".format( - cfg.VERSION, to_version - ) - for k in range(cfg.VERSION, to_version, -1): - converter = globals()["ConverterV" + str(k)] - converter.downgrade(cfg) - cfg.VERSION = k - 1 - return cfg - - -def guess_version(cfg: CN, filename: str) -> int: - """ - Guess the version of a partial config where the VERSION field is not specified. - Returns the version, or the latest if cannot make a guess. - - This makes it easier for users to migrate. - """ - logger = logging.getLogger(__name__) - - def _has(name: str) -> bool: - cur = cfg - for n in name.split("."): - if n not in cur: - return False - cur = cur[n] - return True - - # Most users' partial configs have "MODEL.WEIGHT", so guess on it - ret = None - if _has("MODEL.WEIGHT") or _has("TEST.AUG_ON"): - ret = 1 - - if ret is not None: - logger.warning("Config '{}' has no VERSION. Assuming it to be v{}.".format(filename, ret)) - else: - ret = _C.VERSION - logger.warning( - "Config '{}' has no VERSION. Assuming it to be compatible with latest v{}.".format( - filename, ret - ) - ) - return ret - - -def _rename(cfg: CN, old: str, new: str) -> None: - old_keys = old.split(".") - new_keys = new.split(".") - - def _set(key_seq: List[str], val: str) -> None: - cur = cfg - for k in key_seq[:-1]: - if k not in cur: - cur[k] = CN() - cur = cur[k] - cur[key_seq[-1]] = val - - def _get(key_seq: List[str]) -> CN: - cur = cfg - for k in key_seq: - cur = cur[k] - return cur - - def _del(key_seq: List[str]) -> None: - cur = cfg - for k in key_seq[:-1]: - cur = cur[k] - del cur[key_seq[-1]] - if len(cur) == 0 and len(key_seq) > 1: - _del(key_seq[:-1]) - - _set(new_keys, _get(old_keys)) - _del(old_keys) - - -class _RenameConverter: - """ - A converter that handles simple rename. - """ - - RENAME: List[Tuple[str, str]] = [] # list of tuples of (old name, new name) - - @classmethod - def upgrade(cls, cfg: CN) -> None: - for old, new in cls.RENAME: - _rename(cfg, old, new) - - @classmethod - def downgrade(cls, cfg: CN) -> None: - for old, new in cls.RENAME[::-1]: - _rename(cfg, new, old) - - -class ConverterV1(_RenameConverter): - RENAME = [("MODEL.RPN_HEAD.NAME", "MODEL.RPN.HEAD_NAME")] - - -class ConverterV2(_RenameConverter): - """ - A large bulk of rename, before public release. - """ - - RENAME = [ - ("MODEL.WEIGHT", "MODEL.WEIGHTS"), - ("MODEL.PANOPTIC_FPN.SEMANTIC_LOSS_SCALE", "MODEL.SEM_SEG_HEAD.LOSS_WEIGHT"), - ("MODEL.PANOPTIC_FPN.RPN_LOSS_SCALE", "MODEL.RPN.LOSS_WEIGHT"), - ("MODEL.PANOPTIC_FPN.INSTANCE_LOSS_SCALE", "MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT"), - ("MODEL.PANOPTIC_FPN.COMBINE_ON", "MODEL.PANOPTIC_FPN.COMBINE.ENABLED"), - ( - "MODEL.PANOPTIC_FPN.COMBINE_OVERLAP_THRESHOLD", - "MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH", - ), - ( - "MODEL.PANOPTIC_FPN.COMBINE_STUFF_AREA_LIMIT", - "MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT", - ), - ( - "MODEL.PANOPTIC_FPN.COMBINE_INSTANCES_CONFIDENCE_THRESHOLD", - "MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH", - ), - ("MODEL.ROI_HEADS.SCORE_THRESH", "MODEL.ROI_HEADS.SCORE_THRESH_TEST"), - ("MODEL.ROI_HEADS.NMS", "MODEL.ROI_HEADS.NMS_THRESH_TEST"), - ("MODEL.RETINANET.INFERENCE_SCORE_THRESHOLD", "MODEL.RETINANET.SCORE_THRESH_TEST"), - ("MODEL.RETINANET.INFERENCE_TOPK_CANDIDATES", "MODEL.RETINANET.TOPK_CANDIDATES_TEST"), - ("MODEL.RETINANET.INFERENCE_NMS_THRESHOLD", "MODEL.RETINANET.NMS_THRESH_TEST"), - ("TEST.DETECTIONS_PER_IMG", "TEST.DETECTIONS_PER_IMAGE"), - ("TEST.AUG_ON", "TEST.AUG.ENABLED"), - ("TEST.AUG_MIN_SIZES", "TEST.AUG.MIN_SIZES"), - ("TEST.AUG_MAX_SIZE", "TEST.AUG.MAX_SIZE"), - ("TEST.AUG_FLIP", "TEST.AUG.FLIP"), - ] - - @classmethod - def upgrade(cls, cfg: CN) -> None: - super().upgrade(cfg) - - if cfg.MODEL.META_ARCHITECTURE == "RetinaNet": - _rename( - cfg, "MODEL.RETINANET.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS" - ) - _rename(cfg, "MODEL.RETINANET.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES") - del cfg["MODEL"]["RPN"]["ANCHOR_SIZES"] - del cfg["MODEL"]["RPN"]["ANCHOR_ASPECT_RATIOS"] - else: - _rename(cfg, "MODEL.RPN.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS") - _rename(cfg, "MODEL.RPN.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES") - del cfg["MODEL"]["RETINANET"]["ANCHOR_SIZES"] - del cfg["MODEL"]["RETINANET"]["ANCHOR_ASPECT_RATIOS"] - del cfg["MODEL"]["RETINANET"]["ANCHOR_STRIDES"] - - @classmethod - def downgrade(cls, cfg: CN) -> None: - super().downgrade(cfg) - - _rename(cfg, "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS", "MODEL.RPN.ANCHOR_ASPECT_RATIOS") - _rename(cfg, "MODEL.ANCHOR_GENERATOR.SIZES", "MODEL.RPN.ANCHOR_SIZES") - cfg.MODEL.RETINANET.ANCHOR_ASPECT_RATIOS = cfg.MODEL.RPN.ANCHOR_ASPECT_RATIOS - cfg.MODEL.RETINANET.ANCHOR_SIZES = cfg.MODEL.RPN.ANCHOR_SIZES - cfg.MODEL.RETINANET.ANCHOR_STRIDES = [] # this is not used anywhere in any version diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/docs/tutorials/augmentation.md b/spaces/brjathu/HMR2.0/vendor/detectron2/docs/tutorials/augmentation.md deleted file mode 100644 index 7601a082ceadf645e32468c2045dfe50c1216efc..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/docs/tutorials/augmentation.md +++ /dev/null @@ -1,186 +0,0 @@ - -# Data Augmentation - -Augmentation is an important part of training. -Detectron2's data augmentation system aims at addressing the following goals: - -1. Allow augmenting multiple data types together - (e.g., images together with their bounding boxes and masks) -2. Allow applying a sequence of statically-declared augmentation -3. Allow adding custom new data types to augment (rotated bounding boxes, video clips, etc.) -4. Process and manipulate the __operations__ that are applied by augmentations - -The first two features cover most of the common use cases, and is also -available in other libraries such as [albumentations](https://medium.com/pytorch/multi-target-in-albumentations-16a777e9006e). -Supporting other features adds some overhead to detectron2's augmentation API, -which we'll explain in this tutorial. - -This tutorial focuses on how to use augmentations when writing new data loaders, -and how to write new augmentations. -If you use the default data loader in detectron2, it already supports taking a user-provided list of custom augmentations, -as explained in the [Dataloader tutorial](data_loading). - -## Basic Usage - -The basic usage of feature (1) and (2) is like the following: -```python -from detectron2.data import transforms as T -# Define a sequence of augmentations: -augs = T.AugmentationList([ - T.RandomBrightness(0.9, 1.1), - T.RandomFlip(prob=0.5), - T.RandomCrop("absolute", (640, 640)) -]) # type: T.Augmentation - -# Define the augmentation input ("image" required, others optional): -input = T.AugInput(image, boxes=boxes, sem_seg=sem_seg) -# Apply the augmentation: -transform = augs(input) # type: T.Transform -image_transformed = input.image # new image -sem_seg_transformed = input.sem_seg # new semantic segmentation - -# For any extra data that needs to be augmented together, use transform, e.g.: -image2_transformed = transform.apply_image(image2) -polygons_transformed = transform.apply_polygons(polygons) -``` - -Three basic concepts are involved here. They are: -* [T.Augmentation](../modules/data_transforms.html#detectron2.data.transforms.Augmentation) defines the __"policy"__ to modify inputs. - * its `__call__(AugInput) -> Transform` method augments the inputs in-place, and returns the operation that is applied -* [T.Transform](../modules/data_transforms.html#detectron2.data.transforms.Transform) - implements the actual __operations__ to transform data - * it has methods such as `apply_image`, `apply_coords` that define how to transform each data type -* [T.AugInput](../modules/data_transforms.html#detectron2.data.transforms.AugInput) - stores inputs needed by `T.Augmentation` and how they should be transformed. - This concept is needed for some advanced usage. - Using this class directly should be sufficient for all common use cases, - since extra data not in `T.AugInput` can be augmented using the returned - `transform`, as shown in the above example. - -## Write New Augmentations - -Most 2D augmentations only need to know about the input image. Such augmentation can be implemented easily like this: - -```python -class MyColorAugmentation(T.Augmentation): - def get_transform(self, image): - r = np.random.rand(2) - return T.ColorTransform(lambda x: x * r[0] + r[1] * 10) - -class MyCustomResize(T.Augmentation): - def get_transform(self, image): - old_h, old_w = image.shape[:2] - new_h, new_w = int(old_h * np.random.rand()), int(old_w * 1.5) - return T.ResizeTransform(old_h, old_w, new_h, new_w) - -augs = MyCustomResize() -transform = augs(input) -``` - -In addition to image, any attributes of the given `AugInput` can be used as long -as they are part of the function signature, e.g.: - -```python -class MyCustomCrop(T.Augmentation): - def get_transform(self, image, sem_seg): - # decide where to crop using both image and sem_seg - return T.CropTransform(...) - -augs = MyCustomCrop() -assert hasattr(input, "image") and hasattr(input, "sem_seg") -transform = augs(input) -``` - -New transform operation can also be added by subclassing -[T.Transform](../modules/data_transforms.html#detectron2.data.transforms.Transform). - -## Advanced Usage - -We give a few examples of advanced usages that -are enabled by our system. -These options can be interesting to new research, -although changing them is often not needed -for standard use cases. - -### Custom transform strategy - -Instead of only returning the augmented data, detectron2's `Augmentation` returns the __operations__ as `T.Transform`. -This allows users to apply custom transform strategy on their data. -We use keypoints data as an example. - -Keypoints are (x, y) coordinates, but they are not so trivial to augment due to the semantic meaning they carry. -Such meaning is only known to the users, therefore users may want to augment them manually -by looking at the returned `transform`. -For example, when an image is horizontally flipped, we'd like to swap the keypoint annotations for "left eye" and "right eye". -This can be done like this (included by default in detectron2's default data loader): -```python -# augs, input are defined as in previous examples -transform = augs(input) # type: T.Transform -keypoints_xy = transform.apply_coords(keypoints_xy) # transform the coordinates - -# get a list of all transforms that were applied -transforms = T.TransformList([transform]).transforms -# check if it is flipped for odd number of times -do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms) % 2 == 1 -if do_hflip: - keypoints_xy = keypoints_xy[flip_indices_mapping] -``` - -As another example, keypoints annotations often have a "visibility" field. -A sequence of augmentations might augment a visible keypoint out of the image boundary (e.g. with cropping), -but then bring it back within the boundary afterwards (e.g. with image padding). -If users decide to label such keypoints "invisible", -then the visibility check has to happen after every transform step. -This can be achieved by: - -```python -transform = augs(input) # type: T.TransformList -assert isinstance(transform, T.TransformList) -for t in transform.transforms: - keypoints_xy = t.apply_coords(keypoints_xy) - visibility &= (keypoints_xy >= [0, 0] & keypoints_xy <= [W, H]).all(axis=1) - -# btw, detectron2's `transform_keypoint_annotations` function chooses to label such keypoints "visible": -# keypoints_xy = transform.apply_coords(keypoints_xy) -# visibility &= (keypoints_xy >= [0, 0] & keypoints_xy <= [W, H]).all(axis=1) -``` - - -### Geometrically invert the transform -If images are pre-processed by augmentations before inference, the predicted results -such as segmentation masks are localized on the augmented image. -We'd like to invert the applied augmentation with the [inverse()](../modules/data_transforms.html#detectron2.data.transforms.Transform.inverse) -API, to obtain results on the original image: -```python -transform = augs(input) -pred_mask = make_prediction(input.image) -inv_transform = transform.inverse() -pred_mask_orig = inv_transform.apply_segmentation(pred_mask) -``` - -### Add new data types - -[T.Transform](../modules/data_transforms.html#detectron2.data.transforms.Transform) -supports a few common data types to transform, including images, coordinates, masks, boxes, polygons. -It allows registering new data types, e.g.: -```python -@T.HFlipTransform.register_type("rotated_boxes") -def func(flip_transform: T.HFlipTransform, rotated_boxes: Any): - # do the work - return flipped_rotated_boxes - -t = HFlipTransform(width=800) -transformed_rotated_boxes = t.apply_rotated_boxes(rotated_boxes) # func will be called -``` - -### Extend T.AugInput - -An augmentation can only access attributes available in the given input. -[T.AugInput](../modules/data_transforms.html#detectron2.data.transforms.StandardAugInput) defines "image", "boxes", "sem_seg", -which are sufficient for common augmentation strategies to decide how to augment. -If not, a custom implementation is needed. - -By re-implement the "transform()" method in AugInput, it is also possible to -augment different fields in ways that are dependent on each other. -Such use case is uncommon (e.g. post-process bounding box based on augmented masks), but allowed by the system. - diff --git a/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/hubconf.py b/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/hubconf.py deleted file mode 100644 index df585f8cb411bce4587a3abc9415801c21b60156..0000000000000000000000000000000000000000 --- a/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/hubconf.py +++ /dev/null @@ -1,146 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/ - -Usage: - import torch - model = torch.hub.load('ultralytics/yolov5', 'yolov5s') - model = torch.hub.load('ultralytics/yolov5:master', 'custom', 'path/to/yolov5s.onnx') # file from branch -""" - -import torch - - -def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): - """Creates or loads a YOLOv5 model - - Arguments: - name (str): model name 'yolov5s' or path 'path/to/best.pt' - pretrained (bool): load pretrained weights into the model - channels (int): number of input channels - classes (int): number of model classes - autoshape (bool): apply YOLOv5 .autoshape() wrapper to model - verbose (bool): print all information to screen - device (str, torch.device, None): device to use for model parameters - - Returns: - YOLOv5 model - """ - from pathlib import Path - - from models.common import AutoShape, DetectMultiBackend - from models.yolo import Model - from utils.downloads import attempt_download - from utils.general import LOGGER, check_requirements, intersect_dicts, logging - from utils.torch_utils import select_device - - if not verbose: - LOGGER.setLevel(logging.WARNING) - - check_requirements(exclude=('tensorboard', 'thop', 'opencv-python')) - name = Path(name) - path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path - try: - device = select_device(device) - - if pretrained and channels == 3 and classes == 80: - model = DetectMultiBackend(path, device=device) # download/load FP32 model - # model = models.experimental.attempt_load(path, map_location=device) # download/load FP32 model - else: - cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path - model = Model(cfg, channels, classes) # create model - if pretrained: - ckpt = torch.load(attempt_download(path), map_location=device) # load - csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 - csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect - model.load_state_dict(csd, strict=False) # load - if len(ckpt['model'].names) == classes: - model.names = ckpt['model'].names # set class names attribute - if autoshape: - model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS - return model.to(device) - - except Exception as e: - help_url = 'https://github.com/ultralytics/yolov5/issues/36' - s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' - raise Exception(s) from e - - -def custom(path='path/to/model.pt', autoshape=True, _verbose=True, device=None): - # YOLOv5 custom or local model - return _create(path, autoshape=autoshape, verbose=_verbose, device=device) - - -def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-nano model https://github.com/ultralytics/yolov5 - return _create('yolov5n', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-small model https://github.com/ultralytics/yolov5 - return _create('yolov5s', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-medium model https://github.com/ultralytics/yolov5 - return _create('yolov5m', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-large model https://github.com/ultralytics/yolov5 - return _create('yolov5l', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-xlarge model https://github.com/ultralytics/yolov5 - return _create('yolov5x', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5n6', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5s6', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5m6', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5l6', pretrained, channels, classes, autoshape, _verbose, device) - - -def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): - # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5 - return _create('yolov5x6', pretrained, channels, classes, autoshape, _verbose, device) - - -if __name__ == '__main__': - model = _create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) - # model = custom(path='path/to/model.pt') # custom - - # Verify inference - from pathlib import Path - - import numpy as np - from PIL import Image - - from utils.general import cv2 - - imgs = [ - 'data/images/zidane.jpg', # filename - Path('data/images/zidane.jpg'), # Path - 'https://ultralytics.com/images/zidane.jpg', # URI - cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV - Image.open('data/images/bus.jpg'), # PIL - np.zeros((320, 640, 3))] # numpy - - results = model(imgs, size=320) # batched inference - results.print() - results.save() diff --git a/spaces/callmerk1986/AyurGenie/app.py b/spaces/callmerk1986/AyurGenie/app.py deleted file mode 100644 index 2450f6188455e40d1ed2362834bce5cab50bc075..0000000000000000000000000000000000000000 --- a/spaces/callmerk1986/AyurGenie/app.py +++ /dev/null @@ -1,19 +0,0 @@ -import gradio as gr -from chatbot import get_result, get_result_history - -def predict(message, history): - history_openai_format = [ - { - "role" : "system", - "content" : """You are an ayurvedic assisstant named AyurGenie, you should answer queries in english and give a maximum of 3 best diy and home remedy options from ayurveda texts with source details. - Format the response as bulleted list.""" - }] - for human, assistant in history: - history_openai_format.append({"role": "user", "content": human }) - history_openai_format.append({"role": "assistant", "content":assistant}) - history_openai_format.append({"role": "user", "content": message}) - return get_result_history(history_openai_format) - -gr.ChatInterface(predict, - chatbot=gr.Chatbot(label="AyurGenie - Type your symptoms or an ailment to get DIY home remedies from Ayurveda Texts "), - title="AyurGenie - Unleashing the magic of Ayurveda").launch() \ No newline at end of file diff --git a/spaces/candlend/vits-hoshimi/sovits/vdecoder/parallel_wavegan/models/source.py b/spaces/candlend/vits-hoshimi/sovits/vdecoder/parallel_wavegan/models/source.py deleted file mode 100644 index f2a006e53c0e2194036fd08ea9d6ed4d9a10d6cf..0000000000000000000000000000000000000000 --- a/spaces/candlend/vits-hoshimi/sovits/vdecoder/parallel_wavegan/models/source.py +++ /dev/null @@ -1,538 +0,0 @@ -import torch -import numpy as np -import sys -import torch.nn.functional as torch_nn_func - - -class SineGen(torch.nn.Module): - """ Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__(self, samp_rate, harmonic_num=0, - sine_amp=0.1, noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - self.flag_for_pulse = flag_for_pulse - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def _f02sine(self, f0_values): - """ f0_values: (batchsize, length, dim) - where dim indicates fundamental tone and overtones - """ - # convert to F0 in rad. The interger part n can be ignored - # because 2 * np.pi * n doesn't affect phase - rad_values = (f0_values / self.sampling_rate) % 1 - - # initial phase noise (no noise for fundamental component) - rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \ - device=f0_values.device) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - - # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) - if not self.flag_for_pulse: - # for normal case - - # To prevent torch.cumsum numerical overflow, - # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. - # Buffer tmp_over_one_idx indicates the time step to add -1. - # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi - tmp_over_one = torch.cumsum(rad_values, 1) % 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - - sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) - * 2 * np.pi) - else: - # If necessary, make sure that the first time step of every - # voiced segments is sin(pi) or cos(0) - # This is used for pulse-train generation - - # identify the last time step in unvoiced segments - uv = self._f02uv(f0_values) - uv_1 = torch.roll(uv, shifts=-1, dims=1) - uv_1[:, -1, :] = 1 - u_loc = (uv < 1) * (uv_1 > 0) - - # get the instantanouse phase - tmp_cumsum = torch.cumsum(rad_values, dim=1) - # different batch needs to be processed differently - for idx in range(f0_values.shape[0]): - temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] - temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] - # stores the accumulation of i.phase within - # each voiced segments - tmp_cumsum[idx, :, :] = 0 - tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum - - # rad_values - tmp_cumsum: remove the accumulation of i.phase - # within the previous voiced segment. - i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) - - # get the sines - sines = torch.cos(i_phase * 2 * np.pi) - return sines - - def forward(self, f0): - """ sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, - device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2) - - # generate sine waveforms - sine_waves = self._f02sine(f0_buf) * self.sine_amp - - # generate uv signal - # uv = torch.ones(f0.shape) - # uv = uv * (f0 > self.voiced_threshold) - uv = self._f02uv(f0) - - # noise: for unvoiced should be similar to sine_amp - # std = self.sine_amp/3 -> max value ~ self.sine_amp - # . for voiced regions is self.noise_std - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - - # first: set the unvoiced part to 0 by uv - # then: additive noise - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class PulseGen(torch.nn.Module): - """ Definition of Pulse train generator - - There are many ways to implement pulse generator. - Here, PulseGen is based on SinGen. For a perfect - """ - def __init__(self, samp_rate, pulse_amp = 0.1, - noise_std = 0.003, voiced_threshold = 0): - super(PulseGen, self).__init__() - self.pulse_amp = pulse_amp - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - self.noise_std = noise_std - self.l_sinegen = SineGen(self.sampling_rate, harmonic_num=0, \ - sine_amp=self.pulse_amp, noise_std=0, \ - voiced_threshold=self.voiced_threshold, \ - flag_for_pulse=True) - - def forward(self, f0): - """ Pulse train generator - pulse_train, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output pulse_train: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - - Note: self.l_sine doesn't make sure that the initial phase of - a voiced segment is np.pi, the first pulse in a voiced segment - may not be at the first time step within a voiced segment - """ - with torch.no_grad(): - sine_wav, uv, noise = self.l_sinegen(f0) - - # sine without additive noise - pure_sine = sine_wav - noise - - # step t corresponds to a pulse if - # sine[t] > sine[t+1] & sine[t] > sine[t-1] - # & sine[t-1], sine[t+1], and sine[t] are voiced - # or - # sine[t] is voiced, sine[t-1] is unvoiced - # we use torch.roll to simulate sine[t+1] and sine[t-1] - sine_1 = torch.roll(pure_sine, shifts=1, dims=1) - uv_1 = torch.roll(uv, shifts=1, dims=1) - uv_1[:, 0, :] = 0 - sine_2 = torch.roll(pure_sine, shifts=-1, dims=1) - uv_2 = torch.roll(uv, shifts=-1, dims=1) - uv_2[:, -1, :] = 0 - - loc = (pure_sine > sine_1) * (pure_sine > sine_2) \ - * (uv_1 > 0) * (uv_2 > 0) * (uv > 0) \ - + (uv_1 < 1) * (uv > 0) - - # pulse train without noise - pulse_train = pure_sine * loc - - # additive noise to pulse train - # note that noise from sinegen is zero in voiced regions - pulse_noise = torch.randn_like(pure_sine) * self.noise_std - - # with additive noise on pulse, and unvoiced regions - pulse_train += pulse_noise * loc + pulse_noise * (1 - uv) - return pulse_train, sine_wav, uv, pulse_noise - - -class SignalsConv1d(torch.nn.Module): - """ Filtering input signal with time invariant filter - Note: FIRFilter conducted filtering given fixed FIR weight - SignalsConv1d convolves two signals - Note: this is based on torch.nn.functional.conv1d - - """ - - def __init__(self): - super(SignalsConv1d, self).__init__() - - def forward(self, signal, system_ir): - """ output = forward(signal, system_ir) - - signal: (batchsize, length1, dim) - system_ir: (length2, dim) - - output: (batchsize, length1, dim) - """ - if signal.shape[-1] != system_ir.shape[-1]: - print("Error: SignalsConv1d expects shape:") - print("signal (batchsize, length1, dim)") - print("system_id (batchsize, length2, dim)") - print("But received signal: {:s}".format(str(signal.shape))) - print(" system_ir: {:s}".format(str(system_ir.shape))) - sys.exit(1) - padding_length = system_ir.shape[0] - 1 - groups = signal.shape[-1] - - # pad signal on the left - signal_pad = torch_nn_func.pad(signal.permute(0, 2, 1), \ - (padding_length, 0)) - # prepare system impulse response as (dim, 1, length2) - # also flip the impulse response - ir = torch.flip(system_ir.unsqueeze(1).permute(2, 1, 0), \ - dims=[2]) - # convolute - output = torch_nn_func.conv1d(signal_pad, ir, groups=groups) - return output.permute(0, 2, 1) - - -class CyclicNoiseGen_v1(torch.nn.Module): - """ CyclicnoiseGen_v1 - Cyclic noise with a single parameter of beta. - Pytorch v1 implementation assumes f_t is also fixed - """ - - def __init__(self, samp_rate, - noise_std=0.003, voiced_threshold=0): - super(CyclicNoiseGen_v1, self).__init__() - self.samp_rate = samp_rate - self.noise_std = noise_std - self.voiced_threshold = voiced_threshold - - self.l_pulse = PulseGen(samp_rate, pulse_amp=1.0, - noise_std=noise_std, - voiced_threshold=voiced_threshold) - self.l_conv = SignalsConv1d() - - def noise_decay(self, beta, f0mean): - """ decayed_noise = noise_decay(beta, f0mean) - decayed_noise = n[t]exp(-t * f_mean / beta / samp_rate) - - beta: (dim=1) or (batchsize=1, 1, dim=1) - f0mean (batchsize=1, 1, dim=1) - - decayed_noise (batchsize=1, length, dim=1) - """ - with torch.no_grad(): - # exp(-1.0 n / T) < 0.01 => n > -log(0.01)*T = 4.60*T - # truncate the noise when decayed by -40 dB - length = 4.6 * self.samp_rate / f0mean - length = length.int() - time_idx = torch.arange(0, length, device=beta.device) - time_idx = time_idx.unsqueeze(0).unsqueeze(2) - time_idx = time_idx.repeat(beta.shape[0], 1, beta.shape[2]) - - noise = torch.randn(time_idx.shape, device=beta.device) - - # due to Pytorch implementation, use f0_mean as the f0 factor - decay = torch.exp(-time_idx * f0mean / beta / self.samp_rate) - return noise * self.noise_std * decay - - def forward(self, f0s, beta): - """ Producde cyclic-noise - """ - # pulse train - pulse_train, sine_wav, uv, noise = self.l_pulse(f0s) - pure_pulse = pulse_train - noise - - # decayed_noise (length, dim=1) - if (uv < 1).all(): - # all unvoiced - cyc_noise = torch.zeros_like(sine_wav) - else: - f0mean = f0s[uv > 0].mean() - - decayed_noise = self.noise_decay(beta, f0mean)[0, :, :] - # convolute - cyc_noise = self.l_conv(pure_pulse, decayed_noise) - - # add noise in invoiced segments - cyc_noise = cyc_noise + noise * (1.0 - uv) - return cyc_noise, pulse_train, sine_wav, uv, noise - - -class SineGen(torch.nn.Module): - """ Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__(self, samp_rate, harmonic_num=0, - sine_amp=0.1, noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - self.flag_for_pulse = flag_for_pulse - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def _f02sine(self, f0_values): - """ f0_values: (batchsize, length, dim) - where dim indicates fundamental tone and overtones - """ - # convert to F0 in rad. The interger part n can be ignored - # because 2 * np.pi * n doesn't affect phase - rad_values = (f0_values / self.sampling_rate) % 1 - - # initial phase noise (no noise for fundamental component) - rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \ - device=f0_values.device) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - - # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad) - if not self.flag_for_pulse: - # for normal case - - # To prevent torch.cumsum numerical overflow, - # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1. - # Buffer tmp_over_one_idx indicates the time step to add -1. - # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi - tmp_over_one = torch.cumsum(rad_values, 1) % 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - - sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1) - * 2 * np.pi) - else: - # If necessary, make sure that the first time step of every - # voiced segments is sin(pi) or cos(0) - # This is used for pulse-train generation - - # identify the last time step in unvoiced segments - uv = self._f02uv(f0_values) - uv_1 = torch.roll(uv, shifts=-1, dims=1) - uv_1[:, -1, :] = 1 - u_loc = (uv < 1) * (uv_1 > 0) - - # get the instantanouse phase - tmp_cumsum = torch.cumsum(rad_values, dim=1) - # different batch needs to be processed differently - for idx in range(f0_values.shape[0]): - temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :] - temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :] - # stores the accumulation of i.phase within - # each voiced segments - tmp_cumsum[idx, :, :] = 0 - tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum - - # rad_values - tmp_cumsum: remove the accumulation of i.phase - # within the previous voiced segment. - i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1) - - # get the sines - sines = torch.cos(i_phase * 2 * np.pi) - return sines - - def forward(self, f0): - """ sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, \ - device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2) - - # generate sine waveforms - sine_waves = self._f02sine(f0_buf) * self.sine_amp - - # generate uv signal - # uv = torch.ones(f0.shape) - # uv = uv * (f0 > self.voiced_threshold) - uv = self._f02uv(f0) - - # noise: for unvoiced should be similar to sine_amp - # std = self.sine_amp/3 -> max value ~ self.sine_amp - # . for voiced regions is self.noise_std - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - - # first: set the unvoiced part to 0 by uv - # then: additive noise - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleCycNoise_v1(torch.nn.Module): - """ SourceModuleCycNoise_v1 - SourceModule(sampling_rate, noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - - noise_std: std of Gaussian noise (default: 0.003) - voiced_threshold: threshold to set U/V given F0 (default: 0) - - cyc, noise, uv = SourceModuleCycNoise_v1(F0_upsampled, beta) - F0_upsampled (batchsize, length, 1) - beta (1) - cyc (batchsize, length, 1) - noise (batchsize, length, 1) - uv (batchsize, length, 1) - """ - - def __init__(self, sampling_rate, noise_std=0.003, voiced_threshod=0): - super(SourceModuleCycNoise_v1, self).__init__() - self.sampling_rate = sampling_rate - self.noise_std = noise_std - self.l_cyc_gen = CyclicNoiseGen_v1(sampling_rate, noise_std, - voiced_threshod) - - def forward(self, f0_upsamped, beta): - """ - cyc, noise, uv = SourceModuleCycNoise_v1(F0, beta) - F0_upsampled (batchsize, length, 1) - beta (1) - cyc (batchsize, length, 1) - noise (batchsize, length, 1) - uv (batchsize, length, 1) - """ - # source for harmonic branch - cyc, pulse, sine, uv, add_noi = self.l_cyc_gen(f0_upsamped, beta) - - # source for noise branch, in the same shape as uv - noise = torch.randn_like(uv) * self.noise_std / 3 - return cyc, noise, uv - - -class SourceModuleHnNSF(torch.nn.Module): - """ SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - - # to produce sine waveforms - self.l_sin_gen = SineGen(sampling_rate, harmonic_num, - sine_amp, add_noise_std, voiced_threshod) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x): - """ - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - """ - # source for harmonic branch - sine_wavs, uv, _ = self.l_sin_gen(x) - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - - # source for noise branch, in the same shape as uv - noise = torch.randn_like(uv) * self.sine_amp / 3 - return sine_merge, noise, uv - - -if __name__ == '__main__': - source = SourceModuleCycNoise_v1(24000) - x = torch.randn(16, 25600, 1) - - diff --git a/spaces/chansung/segformer-tf-transformers/app.py b/spaces/chansung/segformer-tf-transformers/app.py deleted file mode 100644 index 2adac90509a278956627920169614ceaabb8accc..0000000000000000000000000000000000000000 --- a/spaces/chansung/segformer-tf-transformers/app.py +++ /dev/null @@ -1,117 +0,0 @@ -import csv -import os -import sys - -import cv2 -import gradio as gr -import matplotlib.pyplot as plt -import numpy as np -import onnxruntime as ort -from matplotlib import gridspec - -ade_palette = [] -labels_list = [] - -csv.field_size_limit(sys.maxsize) - -with open(r"labels.txt", "r") as fp: - for line in fp: - labels_list.append(line[:-1]) - -with open(r"ade_palette.txt", "r") as fp: - for line in fp: - tmp_list = list(map(int, line[:-1].strip("][").split(", "))) - ade_palette.append(tmp_list) - -colormap = np.asarray(ade_palette) - -model_filename = "segformer-b5-finetuned-ade-640-640.onnx" -sess_options = ort.SessionOptions() -sess_options.intra_op_num_threads = os.cpu_count() -sess = ort.InferenceSession( - model_filename, sess_options, providers=["CPUExecutionProvider"] -) - - -def label_to_color_image(label): - if label.ndim != 2: - raise ValueError("Expect 2-D input label") - - if np.max(label) >= len(colormap): - raise ValueError("label value too large.") - - return colormap[label] - - -def draw_plot(pred_img, seg): - fig = plt.figure(figsize=(20, 15)) - - grid_spec = gridspec.GridSpec(1, 2, width_ratios=[6, 1]) - - plt.subplot(grid_spec[0]) - plt.imshow(pred_img) - plt.axis("off") - - LABEL_NAMES = np.asarray(labels_list) - FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1) - FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP) - - unique_labels = np.unique(seg) - ax = plt.subplot(grid_spec[1]) - plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation="nearest") - ax.yaxis.tick_right() - plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels]) - plt.xticks([], []) - ax.tick_params(width=0.0, labelsize=25) - return fig - - -def sepia(input_img): - img = cv2.imread(input_img) - img = cv2.resize(img, (640, 640)).astype(np.float32) - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img_batch = np.expand_dims(img, axis=0) - img_batch = np.transpose(img_batch, (0, 3, 1, 2)) - - logits = sess.run(None, {"pixel_values": img_batch})[0] - - logits = np.transpose(logits, (0, 2, 3, 1)) - seg = np.argmax(logits, axis=-1)[0].astype("float32") - seg = cv2.resize(seg, (640, 640)).astype("uint8") - - color_seg = np.zeros( - (seg.shape[0], seg.shape[1], 3), dtype=np.uint8 - ) # height, width, 3 - - for label, color in enumerate(colormap): - color_seg[seg == label, :] = color - - # Convert to BGR - color_seg = color_seg[..., ::-1] - - # Show image + mask - pred_img = img * 0.5 + color_seg * 0.5 - pred_img = pred_img.astype(np.uint8) - - fig = draw_plot(pred_img, seg) - return fig - - -title = "SegFormer(ADE20k) in TensorFlow" -description = """ - -This is demo TensorFlow SegFormer from 🤗 `transformers` official package. The pre-trained model was trained to segment scene specific images. We are **currently using ONNX model converted from the TensorFlow based SegFormer to improve the latency**. The average latency of an inference is **21** and **8** seconds for TensorFlow and ONNX converted models respectively (in [Colab](https://github.com/deep-diver/segformer-tf-transformers/blob/main/notebooks/TFSegFormer_ONNX.ipynb)). Check out the [repository](https://github.com/deep-diver/segformer-tf-transformers) to find out how to make inference, finetune the model with custom dataset, and further information. - -""" - -demo = gr.Interface( - sepia, - gr.inputs.Image(type="filepath"), - outputs=["plot"], - examples=["ADE_val_00000001.jpeg"], - allow_flagging="never", - title=title, - description=description, -) - -demo.launch() diff --git a/spaces/chendelong/citation-tool/README.md b/spaces/chendelong/citation-tool/README.md deleted file mode 100644 index 130dbd9949b07e0be6133e95e27171bd178dfb0b..0000000000000000000000000000000000000000 --- a/spaces/chendelong/citation-tool/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Citation Tool -emoji: 📚 -colorFrom: purple -colorTo: indigo -sdk: gradio -sdk_version: 3.50.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/chendl/compositional_test/transformers/src/transformers/models/auto/modeling_tf_auto.py b/spaces/chendl/compositional_test/transformers/src/transformers/models/auto/modeling_tf_auto.py deleted file mode 100644 index 8d7d72711ec289496736e488aaf670a5ac956c94..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/src/transformers/models/auto/modeling_tf_auto.py +++ /dev/null @@ -1,655 +0,0 @@ -# coding=utf-8 -# Copyright 2018 The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Auto Model class.""" - - -import warnings -from collections import OrderedDict - -from ...utils import logging -from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update -from .configuration_auto import CONFIG_MAPPING_NAMES - - -logger = logging.get_logger(__name__) - - -TF_MODEL_MAPPING_NAMES = OrderedDict( - [ - # Base model mapping - ("albert", "TFAlbertModel"), - ("bart", "TFBartModel"), - ("bert", "TFBertModel"), - ("blenderbot", "TFBlenderbotModel"), - ("blenderbot-small", "TFBlenderbotSmallModel"), - ("blip", "TFBlipModel"), - ("camembert", "TFCamembertModel"), - ("clip", "TFCLIPModel"), - ("convbert", "TFConvBertModel"), - ("convnext", "TFConvNextModel"), - ("ctrl", "TFCTRLModel"), - ("cvt", "TFCvtModel"), - ("data2vec-vision", "TFData2VecVisionModel"), - ("deberta", "TFDebertaModel"), - ("deberta-v2", "TFDebertaV2Model"), - ("deit", "TFDeiTModel"), - ("distilbert", "TFDistilBertModel"), - ("dpr", "TFDPRQuestionEncoder"), - ("electra", "TFElectraModel"), - ("esm", "TFEsmModel"), - ("flaubert", "TFFlaubertModel"), - ("funnel", ("TFFunnelModel", "TFFunnelBaseModel")), - ("gpt-sw3", "TFGPT2Model"), - ("gpt2", "TFGPT2Model"), - ("gptj", "TFGPTJModel"), - ("groupvit", "TFGroupViTModel"), - ("hubert", "TFHubertModel"), - ("layoutlm", "TFLayoutLMModel"), - ("layoutlmv3", "TFLayoutLMv3Model"), - ("led", "TFLEDModel"), - ("longformer", "TFLongformerModel"), - ("lxmert", "TFLxmertModel"), - ("marian", "TFMarianModel"), - ("mbart", "TFMBartModel"), - ("mobilebert", "TFMobileBertModel"), - ("mobilevit", "TFMobileViTModel"), - ("mpnet", "TFMPNetModel"), - ("mt5", "TFMT5Model"), - ("openai-gpt", "TFOpenAIGPTModel"), - ("opt", "TFOPTModel"), - ("pegasus", "TFPegasusModel"), - ("regnet", "TFRegNetModel"), - ("rembert", "TFRemBertModel"), - ("resnet", "TFResNetModel"), - ("roberta", "TFRobertaModel"), - ("roberta-prelayernorm", "TFRobertaPreLayerNormModel"), - ("roformer", "TFRoFormerModel"), - ("segformer", "TFSegformerModel"), - ("speech_to_text", "TFSpeech2TextModel"), - ("swin", "TFSwinModel"), - ("t5", "TFT5Model"), - ("tapas", "TFTapasModel"), - ("transfo-xl", "TFTransfoXLModel"), - ("vision-text-dual-encoder", "TFVisionTextDualEncoderModel"), - ("vit", "TFViTModel"), - ("vit_mae", "TFViTMAEModel"), - ("wav2vec2", "TFWav2Vec2Model"), - ("whisper", "TFWhisperModel"), - ("xglm", "TFXGLMModel"), - ("xlm", "TFXLMModel"), - ("xlm-roberta", "TFXLMRobertaModel"), - ("xlnet", "TFXLNetModel"), - ] -) - -TF_MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict( - [ - # Model for pre-training mapping - ("albert", "TFAlbertForPreTraining"), - ("bart", "TFBartForConditionalGeneration"), - ("bert", "TFBertForPreTraining"), - ("camembert", "TFCamembertForMaskedLM"), - ("ctrl", "TFCTRLLMHeadModel"), - ("distilbert", "TFDistilBertForMaskedLM"), - ("electra", "TFElectraForPreTraining"), - ("flaubert", "TFFlaubertWithLMHeadModel"), - ("funnel", "TFFunnelForPreTraining"), - ("gpt-sw3", "TFGPT2LMHeadModel"), - ("gpt2", "TFGPT2LMHeadModel"), - ("layoutlm", "TFLayoutLMForMaskedLM"), - ("lxmert", "TFLxmertForPreTraining"), - ("mobilebert", "TFMobileBertForPreTraining"), - ("mpnet", "TFMPNetForMaskedLM"), - ("openai-gpt", "TFOpenAIGPTLMHeadModel"), - ("roberta", "TFRobertaForMaskedLM"), - ("roberta-prelayernorm", "TFRobertaPreLayerNormForMaskedLM"), - ("t5", "TFT5ForConditionalGeneration"), - ("tapas", "TFTapasForMaskedLM"), - ("transfo-xl", "TFTransfoXLLMHeadModel"), - ("vit_mae", "TFViTMAEForPreTraining"), - ("xlm", "TFXLMWithLMHeadModel"), - ("xlm-roberta", "TFXLMRobertaForMaskedLM"), - ("xlnet", "TFXLNetLMHeadModel"), - ] -) - -TF_MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict( - [ - # Model with LM heads mapping - ("albert", "TFAlbertForMaskedLM"), - ("bart", "TFBartForConditionalGeneration"), - ("bert", "TFBertForMaskedLM"), - ("camembert", "TFCamembertForMaskedLM"), - ("convbert", "TFConvBertForMaskedLM"), - ("ctrl", "TFCTRLLMHeadModel"), - ("distilbert", "TFDistilBertForMaskedLM"), - ("electra", "TFElectraForMaskedLM"), - ("esm", "TFEsmForMaskedLM"), - ("flaubert", "TFFlaubertWithLMHeadModel"), - ("funnel", "TFFunnelForMaskedLM"), - ("gpt-sw3", "TFGPT2LMHeadModel"), - ("gpt2", "TFGPT2LMHeadModel"), - ("gptj", "TFGPTJForCausalLM"), - ("layoutlm", "TFLayoutLMForMaskedLM"), - ("led", "TFLEDForConditionalGeneration"), - ("longformer", "TFLongformerForMaskedLM"), - ("marian", "TFMarianMTModel"), - ("mobilebert", "TFMobileBertForMaskedLM"), - ("mpnet", "TFMPNetForMaskedLM"), - ("openai-gpt", "TFOpenAIGPTLMHeadModel"), - ("rembert", "TFRemBertForMaskedLM"), - ("roberta", "TFRobertaForMaskedLM"), - ("roberta-prelayernorm", "TFRobertaPreLayerNormForMaskedLM"), - ("roformer", "TFRoFormerForMaskedLM"), - ("speech_to_text", "TFSpeech2TextForConditionalGeneration"), - ("t5", "TFT5ForConditionalGeneration"), - ("tapas", "TFTapasForMaskedLM"), - ("transfo-xl", "TFTransfoXLLMHeadModel"), - ("whisper", "TFWhisperForConditionalGeneration"), - ("xlm", "TFXLMWithLMHeadModel"), - ("xlm-roberta", "TFXLMRobertaForMaskedLM"), - ("xlnet", "TFXLNetLMHeadModel"), - ] -) - -TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict( - [ - # Model for Causal LM mapping - ("bert", "TFBertLMHeadModel"), - ("camembert", "TFCamembertForCausalLM"), - ("ctrl", "TFCTRLLMHeadModel"), - ("gpt-sw3", "TFGPT2LMHeadModel"), - ("gpt2", "TFGPT2LMHeadModel"), - ("gptj", "TFGPTJForCausalLM"), - ("openai-gpt", "TFOpenAIGPTLMHeadModel"), - ("opt", "TFOPTForCausalLM"), - ("rembert", "TFRemBertForCausalLM"), - ("roberta", "TFRobertaForCausalLM"), - ("roberta-prelayernorm", "TFRobertaPreLayerNormForCausalLM"), - ("roformer", "TFRoFormerForCausalLM"), - ("transfo-xl", "TFTransfoXLLMHeadModel"), - ("xglm", "TFXGLMForCausalLM"), - ("xlm", "TFXLMWithLMHeadModel"), - ("xlm-roberta", "TFXLMRobertaForCausalLM"), - ("xlnet", "TFXLNetLMHeadModel"), - ] -) - -TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES = OrderedDict( - [ - ("deit", "TFDeiTForMaskedImageModeling"), - ("swin", "TFSwinForMaskedImageModeling"), - ] -) - -TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( - [ - # Model for Image-classsification - ("convnext", "TFConvNextForImageClassification"), - ("cvt", "TFCvtForImageClassification"), - ("data2vec-vision", "TFData2VecVisionForImageClassification"), - ("deit", ("TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher")), - ("mobilevit", "TFMobileViTForImageClassification"), - ("regnet", "TFRegNetForImageClassification"), - ("resnet", "TFResNetForImageClassification"), - ("segformer", "TFSegformerForImageClassification"), - ("swin", "TFSwinForImageClassification"), - ("vit", "TFViTForImageClassification"), - ] -) - - -TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( - [ - # Model for Zero Shot Image Classification mapping - ("blip", "TFBlipModel"), - ("clip", "TFCLIPModel"), - ] -) - - -TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES = OrderedDict( - [ - # Model for Semantic Segmentation mapping - ("data2vec-vision", "TFData2VecVisionForSemanticSegmentation"), - ("mobilevit", "TFMobileViTForSemanticSegmentation"), - ("segformer", "TFSegformerForSemanticSegmentation"), - ] -) - -TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = OrderedDict( - [ - ("blip", "TFBlipForConditionalGeneration"), - ("vision-encoder-decoder", "TFVisionEncoderDecoderModel"), - ] -) - -TF_MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict( - [ - # Model for Masked LM mapping - ("albert", "TFAlbertForMaskedLM"), - ("bert", "TFBertForMaskedLM"), - ("camembert", "TFCamembertForMaskedLM"), - ("convbert", "TFConvBertForMaskedLM"), - ("deberta", "TFDebertaForMaskedLM"), - ("deberta-v2", "TFDebertaV2ForMaskedLM"), - ("distilbert", "TFDistilBertForMaskedLM"), - ("electra", "TFElectraForMaskedLM"), - ("esm", "TFEsmForMaskedLM"), - ("flaubert", "TFFlaubertWithLMHeadModel"), - ("funnel", "TFFunnelForMaskedLM"), - ("layoutlm", "TFLayoutLMForMaskedLM"), - ("longformer", "TFLongformerForMaskedLM"), - ("mobilebert", "TFMobileBertForMaskedLM"), - ("mpnet", "TFMPNetForMaskedLM"), - ("rembert", "TFRemBertForMaskedLM"), - ("roberta", "TFRobertaForMaskedLM"), - ("roberta-prelayernorm", "TFRobertaPreLayerNormForMaskedLM"), - ("roformer", "TFRoFormerForMaskedLM"), - ("tapas", "TFTapasForMaskedLM"), - ("xlm", "TFXLMWithLMHeadModel"), - ("xlm-roberta", "TFXLMRobertaForMaskedLM"), - ] -) - -TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict( - [ - # Model for Seq2Seq Causal LM mapping - ("bart", "TFBartForConditionalGeneration"), - ("blenderbot", "TFBlenderbotForConditionalGeneration"), - ("blenderbot-small", "TFBlenderbotSmallForConditionalGeneration"), - ("encoder-decoder", "TFEncoderDecoderModel"), - ("led", "TFLEDForConditionalGeneration"), - ("marian", "TFMarianMTModel"), - ("mbart", "TFMBartForConditionalGeneration"), - ("mt5", "TFMT5ForConditionalGeneration"), - ("pegasus", "TFPegasusForConditionalGeneration"), - ("t5", "TFT5ForConditionalGeneration"), - ] -) - -TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict( - [ - ("speech_to_text", "TFSpeech2TextForConditionalGeneration"), - ("whisper", "TFWhisperForConditionalGeneration"), - ] -) - -TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( - [ - # Model for Sequence Classification mapping - ("albert", "TFAlbertForSequenceClassification"), - ("bart", "TFBartForSequenceClassification"), - ("bert", "TFBertForSequenceClassification"), - ("camembert", "TFCamembertForSequenceClassification"), - ("convbert", "TFConvBertForSequenceClassification"), - ("ctrl", "TFCTRLForSequenceClassification"), - ("deberta", "TFDebertaForSequenceClassification"), - ("deberta-v2", "TFDebertaV2ForSequenceClassification"), - ("distilbert", "TFDistilBertForSequenceClassification"), - ("electra", "TFElectraForSequenceClassification"), - ("esm", "TFEsmForSequenceClassification"), - ("flaubert", "TFFlaubertForSequenceClassification"), - ("funnel", "TFFunnelForSequenceClassification"), - ("gpt-sw3", "TFGPT2ForSequenceClassification"), - ("gpt2", "TFGPT2ForSequenceClassification"), - ("gptj", "TFGPTJForSequenceClassification"), - ("layoutlm", "TFLayoutLMForSequenceClassification"), - ("layoutlmv3", "TFLayoutLMv3ForSequenceClassification"), - ("longformer", "TFLongformerForSequenceClassification"), - ("mobilebert", "TFMobileBertForSequenceClassification"), - ("mpnet", "TFMPNetForSequenceClassification"), - ("openai-gpt", "TFOpenAIGPTForSequenceClassification"), - ("rembert", "TFRemBertForSequenceClassification"), - ("roberta", "TFRobertaForSequenceClassification"), - ("roberta-prelayernorm", "TFRobertaPreLayerNormForSequenceClassification"), - ("roformer", "TFRoFormerForSequenceClassification"), - ("tapas", "TFTapasForSequenceClassification"), - ("transfo-xl", "TFTransfoXLForSequenceClassification"), - ("xlm", "TFXLMForSequenceClassification"), - ("xlm-roberta", "TFXLMRobertaForSequenceClassification"), - ("xlnet", "TFXLNetForSequenceClassification"), - ] -) - -TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( - [ - # Model for Question Answering mapping - ("albert", "TFAlbertForQuestionAnswering"), - ("bert", "TFBertForQuestionAnswering"), - ("camembert", "TFCamembertForQuestionAnswering"), - ("convbert", "TFConvBertForQuestionAnswering"), - ("deberta", "TFDebertaForQuestionAnswering"), - ("deberta-v2", "TFDebertaV2ForQuestionAnswering"), - ("distilbert", "TFDistilBertForQuestionAnswering"), - ("electra", "TFElectraForQuestionAnswering"), - ("flaubert", "TFFlaubertForQuestionAnsweringSimple"), - ("funnel", "TFFunnelForQuestionAnswering"), - ("gptj", "TFGPTJForQuestionAnswering"), - ("layoutlmv3", "TFLayoutLMv3ForQuestionAnswering"), - ("longformer", "TFLongformerForQuestionAnswering"), - ("mobilebert", "TFMobileBertForQuestionAnswering"), - ("mpnet", "TFMPNetForQuestionAnswering"), - ("rembert", "TFRemBertForQuestionAnswering"), - ("roberta", "TFRobertaForQuestionAnswering"), - ("roberta-prelayernorm", "TFRobertaPreLayerNormForQuestionAnswering"), - ("roformer", "TFRoFormerForQuestionAnswering"), - ("xlm", "TFXLMForQuestionAnsweringSimple"), - ("xlm-roberta", "TFXLMRobertaForQuestionAnswering"), - ("xlnet", "TFXLNetForQuestionAnsweringSimple"), - ] -) - -TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( - [ - ("layoutlm", "TFLayoutLMForQuestionAnswering"), - ] -) - - -TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( - [ - # Model for Table Question Answering mapping - ("tapas", "TFTapasForQuestionAnswering"), - ] -) - -TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict( - [ - # Model for Token Classification mapping - ("albert", "TFAlbertForTokenClassification"), - ("bert", "TFBertForTokenClassification"), - ("camembert", "TFCamembertForTokenClassification"), - ("convbert", "TFConvBertForTokenClassification"), - ("deberta", "TFDebertaForTokenClassification"), - ("deberta-v2", "TFDebertaV2ForTokenClassification"), - ("distilbert", "TFDistilBertForTokenClassification"), - ("electra", "TFElectraForTokenClassification"), - ("esm", "TFEsmForTokenClassification"), - ("flaubert", "TFFlaubertForTokenClassification"), - ("funnel", "TFFunnelForTokenClassification"), - ("layoutlm", "TFLayoutLMForTokenClassification"), - ("layoutlmv3", "TFLayoutLMv3ForTokenClassification"), - ("longformer", "TFLongformerForTokenClassification"), - ("mobilebert", "TFMobileBertForTokenClassification"), - ("mpnet", "TFMPNetForTokenClassification"), - ("rembert", "TFRemBertForTokenClassification"), - ("roberta", "TFRobertaForTokenClassification"), - ("roberta-prelayernorm", "TFRobertaPreLayerNormForTokenClassification"), - ("roformer", "TFRoFormerForTokenClassification"), - ("xlm", "TFXLMForTokenClassification"), - ("xlm-roberta", "TFXLMRobertaForTokenClassification"), - ("xlnet", "TFXLNetForTokenClassification"), - ] -) - -TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict( - [ - # Model for Multiple Choice mapping - ("albert", "TFAlbertForMultipleChoice"), - ("bert", "TFBertForMultipleChoice"), - ("camembert", "TFCamembertForMultipleChoice"), - ("convbert", "TFConvBertForMultipleChoice"), - ("distilbert", "TFDistilBertForMultipleChoice"), - ("electra", "TFElectraForMultipleChoice"), - ("flaubert", "TFFlaubertForMultipleChoice"), - ("funnel", "TFFunnelForMultipleChoice"), - ("longformer", "TFLongformerForMultipleChoice"), - ("mobilebert", "TFMobileBertForMultipleChoice"), - ("mpnet", "TFMPNetForMultipleChoice"), - ("rembert", "TFRemBertForMultipleChoice"), - ("roberta", "TFRobertaForMultipleChoice"), - ("roberta-prelayernorm", "TFRobertaPreLayerNormForMultipleChoice"), - ("roformer", "TFRoFormerForMultipleChoice"), - ("xlm", "TFXLMForMultipleChoice"), - ("xlm-roberta", "TFXLMRobertaForMultipleChoice"), - ("xlnet", "TFXLNetForMultipleChoice"), - ] -) - -TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict( - [ - ("bert", "TFBertForNextSentencePrediction"), - ("mobilebert", "TFMobileBertForNextSentencePrediction"), - ] -) - -TF_MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_MAPPING_NAMES) -TF_MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_PRETRAINING_MAPPING_NAMES) -TF_MODEL_WITH_LM_HEAD_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_WITH_LM_HEAD_MAPPING_NAMES) -TF_MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) -TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES -) -TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES -) -TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES -) -TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES -) -TF_MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) -TF_MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MASKED_LM_MAPPING_NAMES) -TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES -) -TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES -) -TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES -) -TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES -) -TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES -) -TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES -) -TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES -) -TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES -) -TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping( - CONFIG_MAPPING_NAMES, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES -) - - -class TFAutoModel(_BaseAutoModelClass): - _model_mapping = TF_MODEL_MAPPING - - -TFAutoModel = auto_class_update(TFAutoModel) - - -class TFAutoModelForPreTraining(_BaseAutoModelClass): - _model_mapping = TF_MODEL_FOR_PRETRAINING_MAPPING - - -TFAutoModelForPreTraining = auto_class_update(TFAutoModelForPreTraining, head_doc="pretraining") - - -# Private on purpose, the public class will add the deprecation warnings. -class _TFAutoModelWithLMHead(_BaseAutoModelClass): - _model_mapping = TF_MODEL_WITH_LM_HEAD_MAPPING - - -_TFAutoModelWithLMHead = auto_class_update(_TFAutoModelWithLMHead, head_doc="language modeling") - - -class TFAutoModelForCausalLM(_BaseAutoModelClass): - _model_mapping = TF_MODEL_FOR_CAUSAL_LM_MAPPING - - -TFAutoModelForCausalLM = auto_class_update(TFAutoModelForCausalLM, head_doc="causal language modeling") - - -class TFAutoModelForMaskedImageModeling(_BaseAutoModelClass): - _model_mapping = TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING - - -TFAutoModelForMaskedImageModeling = auto_class_update( - TFAutoModelForMaskedImageModeling, head_doc="masked image modeling" -) - - -class TFAutoModelForImageClassification(_BaseAutoModelClass): - _model_mapping = TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING - - -TFAutoModelForImageClassification = auto_class_update( - TFAutoModelForImageClassification, head_doc="image classification" -) - - -class TFAutoModelForZeroShotImageClassification(_BaseAutoModelClass): - _model_mapping = TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING - - -TFAutoModelForZeroShotImageClassification = auto_class_update( - TFAutoModelForZeroShotImageClassification, head_doc="zero-shot image classification" -) - - -class TFAutoModelForSemanticSegmentation(_BaseAutoModelClass): - _model_mapping = TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING - - -TF_AutoModelForSemanticSegmentation = auto_class_update( - TFAutoModelForSemanticSegmentation, head_doc="semantic segmentation" -) - - -class TFAutoModelForVision2Seq(_BaseAutoModelClass): - _model_mapping = TF_MODEL_FOR_VISION_2_SEQ_MAPPING - - -TFAutoModelForVision2Seq = auto_class_update(TFAutoModelForVision2Seq, head_doc="vision-to-text modeling") - - -class TFAutoModelForMaskedLM(_BaseAutoModelClass): - _model_mapping = TF_MODEL_FOR_MASKED_LM_MAPPING - - -TFAutoModelForMaskedLM = auto_class_update(TFAutoModelForMaskedLM, head_doc="masked language modeling") - - -class TFAutoModelForSeq2SeqLM(_BaseAutoModelClass): - _model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING - - -TFAutoModelForSeq2SeqLM = auto_class_update( - TFAutoModelForSeq2SeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" -) - - -class TFAutoModelForSequenceClassification(_BaseAutoModelClass): - _model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING - - -TFAutoModelForSequenceClassification = auto_class_update( - TFAutoModelForSequenceClassification, head_doc="sequence classification" -) - - -class TFAutoModelForQuestionAnswering(_BaseAutoModelClass): - _model_mapping = TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING - - -TFAutoModelForQuestionAnswering = auto_class_update(TFAutoModelForQuestionAnswering, head_doc="question answering") - - -class TFAutoModelForDocumentQuestionAnswering(_BaseAutoModelClass): - _model_mapping = TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING - - -TFAutoModelForDocumentQuestionAnswering = auto_class_update( - TFAutoModelForDocumentQuestionAnswering, - head_doc="document question answering", - checkpoint_for_example='impira/layoutlm-document-qa", revision="52e01b3', -) - - -class TFAutoModelForTableQuestionAnswering(_BaseAutoModelClass): - _model_mapping = TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING - - -TFAutoModelForTableQuestionAnswering = auto_class_update( - TFAutoModelForTableQuestionAnswering, - head_doc="table question answering", - checkpoint_for_example="google/tapas-base-finetuned-wtq", -) - - -class TFAutoModelForTokenClassification(_BaseAutoModelClass): - _model_mapping = TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING - - -TFAutoModelForTokenClassification = auto_class_update( - TFAutoModelForTokenClassification, head_doc="token classification" -) - - -class TFAutoModelForMultipleChoice(_BaseAutoModelClass): - _model_mapping = TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING - - -TFAutoModelForMultipleChoice = auto_class_update(TFAutoModelForMultipleChoice, head_doc="multiple choice") - - -class TFAutoModelForNextSentencePrediction(_BaseAutoModelClass): - _model_mapping = TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING - - -TFAutoModelForNextSentencePrediction = auto_class_update( - TFAutoModelForNextSentencePrediction, head_doc="next sentence prediction" -) - - -class TFAutoModelForSpeechSeq2Seq(_BaseAutoModelClass): - _model_mapping = TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING - - -TFAutoModelForSpeechSeq2Seq = auto_class_update( - TFAutoModelForSpeechSeq2Seq, head_doc="sequence-to-sequence speech-to-text modeling" -) - - -class TFAutoModelWithLMHead(_TFAutoModelWithLMHead): - @classmethod - def from_config(cls, config): - warnings.warn( - "The class `TFAutoModelWithLMHead` is deprecated and will be removed in a future version. Please use" - " `TFAutoModelForCausalLM` for causal language models, `TFAutoModelForMaskedLM` for masked language models" - " and `TFAutoModelForSeq2SeqLM` for encoder-decoder models.", - FutureWarning, - ) - return super().from_config(config) - - @classmethod - def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): - warnings.warn( - "The class `TFAutoModelWithLMHead` is deprecated and will be removed in a future version. Please use" - " `TFAutoModelForCausalLM` for causal language models, `TFAutoModelForMaskedLM` for masked language models" - " and `TFAutoModelForSeq2SeqLM` for encoder-decoder models.", - FutureWarning, - ) - return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs) diff --git a/spaces/chendl/compositional_test/transformers/src/transformers/models/bart/modeling_tf_bart.py b/spaces/chendl/compositional_test/transformers/src/transformers/models/bart/modeling_tf_bart.py deleted file mode 100644 index 6e29434c4df15818ec9c4acbe174f2c6417164e6..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/src/transformers/models/bart/modeling_tf_bart.py +++ /dev/null @@ -1,1607 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" TF 2.0 Bart model.""" - - -import random -from typing import Optional, Tuple, Union - -import numpy as np -import tensorflow as tf - -from ...activations_tf import get_tf_activation -from ...modeling_tf_outputs import ( - TFBaseModelOutput, - TFBaseModelOutputWithPastAndCrossAttentions, - TFSeq2SeqLMOutput, - TFSeq2SeqModelOutput, - TFSeq2SeqSequenceClassifierOutput, -) - -# Public API -from ...modeling_tf_utils import ( - DUMMY_INPUTS, - TFCausalLanguageModelingLoss, - TFModelInputType, - TFPreTrainedModel, - TFSequenceClassificationLoss, - keras_serializable, - unpack_inputs, -) -from ...tf_utils import shape_list, stable_softmax -from ...utils import ( - ContextManagers, - add_code_sample_docstrings, - add_end_docstrings, - add_start_docstrings, - add_start_docstrings_to_model_forward, - logging, - replace_return_docstrings, -) -from .configuration_bart import BartConfig - - -logger = logging.get_logger(__name__) - -_CHECKPOINT_FOR_DOC = "facebook/bart-large" -_CONFIG_FOR_DOC = "BartConfig" - - -LARGE_NEGATIVE = -1e8 - - -def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): - pad_token_id = tf.cast(pad_token_id, input_ids.dtype) - decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) - start_tokens = tf.fill( - (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype) - ) - shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) - # replace possible -100 values in labels by `pad_token_id` - shifted_input_ids = tf.where( - shifted_input_ids == -100, - tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), - shifted_input_ids, - ) - - # "Verify that `labels` has only positive values and -100" - assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) - - # Make sure the assertion op is called by wrapping the result in an identity no-op - with tf.control_dependencies([assert_gte0]): - shifted_input_ids = tf.identity(shifted_input_ids) - - return shifted_input_ids - - -def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0): - """ - Make causal mask used for bi-directional self-attention. - """ - bsz = input_ids_shape[0] - tgt_len = input_ids_shape[1] - mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE - mask_cond = tf.range(shape_list(mask)[-1]) - - mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask) - - if past_key_values_length > 0: - mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) - - return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) - - -def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None): - """ - Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. - """ - src_len = shape_list(mask)[1] - tgt_len = tgt_len if tgt_len is not None else src_len - one_cst = tf.constant(1.0) - mask = tf.cast(mask, dtype=one_cst.dtype) - expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) - - return (one_cst - expanded_mask) * LARGE_NEGATIVE - - -class TFBartLearnedPositionalEmbedding(tf.keras.layers.Embedding): - """ - This module learns positional embeddings up to a fixed maximum size. - """ - - def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs): - # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2 - # and adjust num_embeddings appropriately. Other models don't have this hack - self.offset = 2 - super().__init__(num_embeddings + self.offset, embedding_dim, **kwargs) - - def call( - self, - input_shape: Optional[tf.TensorShape] = None, - past_key_values_length: int = 0, - position_ids: Optional[tf.Tensor] = None, - ): - """Input is expected to be of size [bsz x seqlen].""" - if position_ids is None: - seq_len = input_shape[1] - position_ids = tf.range(seq_len, delta=1, name="range") - position_ids += past_key_values_length - - offset_dtype = position_ids.dtype if isinstance(position_ids, tf.Tensor) else tf.int32 - return super().call(position_ids + tf.constant(self.offset, dtype=offset_dtype)) - - -class TFBartAttention(tf.keras.layers.Layer): - """Multi-headed attention from "Attention Is All You Need""" - - def __init__( - self, - embed_dim: int, - num_heads: int, - dropout: float = 0.0, - is_decoder: bool = False, - bias: bool = True, - **kwargs, - ): - super().__init__(**kwargs) - self.embed_dim = embed_dim - - self.num_heads = num_heads - self.dropout = tf.keras.layers.Dropout(dropout) - self.head_dim = embed_dim // num_heads - if (self.head_dim * num_heads) != self.embed_dim: - raise ValueError( - f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" - f" and `num_heads`: {num_heads})." - ) - self.scaling = self.head_dim**-0.5 - self.is_decoder = is_decoder - - self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj") - self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") - self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") - self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") - - def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): - return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) - - def call( - self, - hidden_states: tf.Tensor, - key_value_states: Optional[tf.Tensor] = None, - past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None, - attention_mask: Optional[tf.Tensor] = None, - layer_head_mask: Optional[tf.Tensor] = None, - training: Optional[bool] = False, - ) -> Tuple[tf.Tensor, Optional[tf.Tensor]]: - """Input shape: Batch x Time x Channel""" - - # if key_value_states are provided this layer is used as a cross-attention layer - # for the decoder - is_cross_attention = key_value_states is not None - bsz, tgt_len, embed_dim = shape_list(hidden_states) - - # get query proj - query_states = self.q_proj(hidden_states) * self.scaling - # get key, value proj - if is_cross_attention and past_key_value is not None: - # reuse k,v, cross_attentions - key_states = past_key_value[0] - value_states = past_key_value[1] - elif is_cross_attention: - # cross_attentions - key_states = self._shape(self.k_proj(key_value_states), -1, bsz) - value_states = self._shape(self.v_proj(key_value_states), -1, bsz) - elif past_key_value is not None: - # reuse k, v, self_attention - key_states = self._shape(self.k_proj(hidden_states), -1, bsz) - value_states = self._shape(self.v_proj(hidden_states), -1, bsz) - key_states = tf.concat([past_key_value[0], key_states], axis=2) - value_states = tf.concat([past_key_value[1], value_states], axis=2) - else: - # self_attention - key_states = self._shape(self.k_proj(hidden_states), -1, bsz) - value_states = self._shape(self.v_proj(hidden_states), -1, bsz) - - if self.is_decoder: - # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. - # Further calls to cross_attention layer can then reuse all cross-attention - # key/value_states (first "if" case) - # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of - # all previous decoder key/value_states. Further calls to uni-directional self-attention - # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) - # if encoder bi-directional self-attention `past_key_value` is always `None` - past_key_value = (key_states, value_states) - - proj_shape = (bsz * self.num_heads, -1, self.head_dim) - query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) - key_states = tf.reshape(key_states, proj_shape) - value_states = tf.reshape(value_states, proj_shape) - - src_len = shape_list(key_states)[1] - attn_weights = tf.matmul(query_states, key_states, transpose_b=True) - - tf.debugging.assert_equal( - shape_list(attn_weights), - [bsz * self.num_heads, tgt_len, src_len], - message=( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" - f" {shape_list(attn_weights)}" - ), - ) - - if attention_mask is not None: - tf.debugging.assert_equal( - shape_list(attention_mask), - [bsz, 1, tgt_len, src_len], - message=( - f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" - f" {shape_list(attention_mask)}" - ), - ) - - attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) - attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask - attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) - - attn_weights = stable_softmax(attn_weights, axis=-1) - - if layer_head_mask is not None: - tf.debugging.assert_equal( - shape_list(layer_head_mask), - [self.num_heads], - message=( - f"Head mask for a single layer should be of size {(self.num_heads)}, but is" - f" {shape_list(layer_head_mask)}" - ), - ) - - attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( - attn_weights, (bsz, self.num_heads, tgt_len, src_len) - ) - attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) - - attn_probs = self.dropout(attn_weights, training=training) - attn_output = tf.matmul(attn_probs, value_states) - - tf.debugging.assert_equal( - shape_list(attn_output), - [bsz * self.num_heads, tgt_len, self.head_dim], - message=( - f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" - f" {shape_list(attn_output)}" - ), - ) - - attn_output = tf.transpose( - tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) - ) - attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) - - attn_output = self.out_proj(attn_output) - attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) - - return attn_output, attn_weights, past_key_value - - -class TFBartEncoderLayer(tf.keras.layers.Layer): - def __init__(self, config: BartConfig, **kwargs): - super().__init__(**kwargs) - self.embed_dim = config.d_model - self.self_attn = TFBartAttention( - self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn" - ) - self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") - self.dropout = tf.keras.layers.Dropout(config.dropout) - self.activation_fn = get_tf_activation(config.activation_function) - self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout) - self.fc1 = tf.keras.layers.Dense(config.encoder_ffn_dim, name="fc1") - self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2") - self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") - - def call( - self, - hidden_states: tf.Tensor, - attention_mask: Optional[Union[np.ndarray, tf.Tensor]], - layer_head_mask: Optional[tf.Tensor], - training: Optional[bool] = False, - ) -> tf.Tensor: - """ - Args: - hidden_states (`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)` - attention_mask (`tf.Tensor`): attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. - layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size - `(encoder_attention_heads,)` - """ - residual = hidden_states - hidden_states, self_attn_weights, _ = self.self_attn( - hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask - ) - - tf.debugging.assert_equal( - shape_list(hidden_states), - shape_list(residual), - message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", - ) - - hidden_states = self.dropout(hidden_states, training=training) - hidden_states = residual + hidden_states - hidden_states = self.self_attn_layer_norm(hidden_states) - - residual = hidden_states - hidden_states = self.activation_fn(self.fc1(hidden_states)) - hidden_states = self.activation_dropout(hidden_states, training=training) - hidden_states = self.fc2(hidden_states) - hidden_states = self.dropout(hidden_states, training=training) - hidden_states = residual + hidden_states - hidden_states = self.final_layer_norm(hidden_states) - - return hidden_states, self_attn_weights - - -class TFBartDecoderLayer(tf.keras.layers.Layer): - def __init__(self, config: BartConfig, **kwargs): - super().__init__(**kwargs) - self.embed_dim = config.d_model - self.self_attn = TFBartAttention( - embed_dim=self.embed_dim, - num_heads=config.decoder_attention_heads, - dropout=config.attention_dropout, - name="self_attn", - is_decoder=True, - ) - self.dropout = tf.keras.layers.Dropout(config.dropout) - self.activation_fn = get_tf_activation(config.activation_function) - self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout) - - self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") - self.encoder_attn = TFBartAttention( - self.embed_dim, - config.decoder_attention_heads, - dropout=config.attention_dropout, - name="encoder_attn", - is_decoder=True, - ) - self.encoder_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm") - self.fc1 = tf.keras.layers.Dense(config.decoder_ffn_dim, name="fc1") - self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2") - self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") - - def call( - self, - hidden_states: tf.Tensor, - attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - encoder_hidden_states: Optional[Union[np.ndarray, tf.Tensor]] = None, - encoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - layer_head_mask: Optional[tf.Tensor] = None, - cross_attn_layer_head_mask: Optional[tf.Tensor] = None, - past_key_value: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, - training: Optional[bool] = False, - ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: - """ - Args: - hidden_states (`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)` - attention_mask (`tf.Tensor`): attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. - encoder_hidden_states (`tf.Tensor`): - cross attention input to the layer of shape `(seq_len, batch, embed_dim)` - encoder_attention_mask (`tf.Tensor`): encoder attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. - layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size - `(decoder_attention_heads,)` - cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module. - `(decoder_attention_heads,)` - past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states - """ - residual = hidden_states - - # Self Attention - # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 - self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None - # add present self-attn cache to positions 1,2 of present_key_value tuple - hidden_states, self_attn_weights, present_key_value = self.self_attn( - hidden_states=hidden_states, - past_key_value=self_attn_past_key_value, - attention_mask=attention_mask, - layer_head_mask=layer_head_mask, - ) - hidden_states = self.dropout(hidden_states, training=training) - hidden_states = residual + hidden_states - hidden_states = self.self_attn_layer_norm(hidden_states) - - # Cross-Attention Block - cross_attn_present_key_value = None - cross_attn_weights = None - if encoder_hidden_states is not None: - residual = hidden_states - - # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple - cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None - hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( - hidden_states=hidden_states, - key_value_states=encoder_hidden_states, - attention_mask=encoder_attention_mask, - layer_head_mask=cross_attn_layer_head_mask, - past_key_value=cross_attn_past_key_value, - ) - hidden_states = self.dropout(hidden_states, training=training) - hidden_states = residual + hidden_states - hidden_states = self.encoder_attn_layer_norm(hidden_states) - - # add cross-attn to positions 3,4 of present_key_value tuple - present_key_value = present_key_value + cross_attn_present_key_value - - # Fully Connected - residual = hidden_states - hidden_states = self.activation_fn(self.fc1(hidden_states)) - hidden_states = self.activation_dropout(hidden_states, training=training) - hidden_states = self.fc2(hidden_states) - hidden_states = self.dropout(hidden_states, training=training) - hidden_states = residual + hidden_states - hidden_states = self.final_layer_norm(hidden_states) - - return ( - hidden_states, - self_attn_weights, - cross_attn_weights, - present_key_value, - ) - - -class TFBartClassificationHead(tf.keras.layers.Layer): - """Head for sentence-level classification tasks.""" - - def __init__(self, inner_dim: int, num_classes: int, pooler_dropout: float, name: str, **kwargs): - super().__init__(name=name, **kwargs) - self.dense = tf.keras.layers.Dense(inner_dim, name="dense") - self.dropout = tf.keras.layers.Dropout(pooler_dropout) - self.out_proj = tf.keras.layers.Dense(num_classes, name="out_proj") - - def call(self, inputs): - hidden_states = self.dropout(inputs) - hidden_states = self.dense(hidden_states) - hidden_states = tf.keras.activations.tanh(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.out_proj(hidden_states) - return hidden_states - - -class TFBartPretrainedModel(TFPreTrainedModel): - config_class = BartConfig - base_model_prefix = "model" - - @property - def dummy_inputs(self): - pad_token = 1 - input_ids = tf.convert_to_tensor(DUMMY_INPUTS, dtype=tf.int32) - decoder_input_ids = tf.convert_to_tensor(DUMMY_INPUTS, dtype=tf.int32) - dummy_inputs = { - "decoder_input_ids": decoder_input_ids, - "attention_mask": tf.cast(input_ids != pad_token, tf.int32), - "input_ids": input_ids, - } - return dummy_inputs - - @tf.function( - input_signature=[ - { - "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), - "decoder_input_ids": tf.TensorSpec((None, None), tf.int32, name="decoder_input_ids"), - "decoder_attention_mask": tf.TensorSpec((None, None), tf.int32, name="decoder_attention_mask"), - } - ] - ) - def serving(self, inputs): - output = self.call(inputs) - - return self.serving_output(output) - - -BART_START_DOCSTRING = r""" - This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the - library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads - etc.) - - This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it - as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and - behavior. - - - - TensorFlow models and layers in `transformers` accept two formats as input: - - - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional argument. - - The reason the second format is supported is that Keras methods prefer this format when passing inputs to models - and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just - pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second - format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with - the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first - positional argument: - - - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: - `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - - a dictionary with one or several input Tensors associated to the input names given in the docstring: - `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` - - Note that when creating models and layers with - [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry - about any of this, as you can just pass inputs like you would to any other Python function! - - - - Args: - config ([`BartConfig`]): Model configuration class with all the parameters of the model. - Initializing with a config file does not load the weights associated with the model, only the - configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. -""" - - -BART_GENERATION_EXAMPLE = r""" - Summarization example: - - ```python - >>> from transformers import AutoTokenizer, TFBartForConditionalGeneration - - >>> model = TFBartForConditionalGeneration.from_pretrained("facebook/bart-large") - >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large") - - >>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs." - >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="tf") - - >>> # Generate Summary - >>> summary_ids = model.generate(inputs["input_ids"], num_beams=4, max_length=5) - >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)) - ``` - - Mask filling example: - - ```python - >>> from transformers import AutoTokenizer, TFBartForConditionalGeneration - - >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large") - >>> TXT = "My friends are but they eat too many carbs." - - >>> model = TFBartForConditionalGeneration.from_pretrained("facebook/bart-large") - >>> input_ids = tokenizer([TXT], return_tensors="tf")["input_ids"] - >>> logits = model(input_ids).logits - >>> probs = tf.nn.softmax(logits[0]) - >>> # probs[5] is associated with the mask token - ``` -""" - - -BART_INPUTS_DOCSTRING = r""" - Args: - input_ids (`tf.Tensor` of shape `({0})`): - Indices of input sequence tokens in the vocabulary. - - Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`tf.Tensor` of shape `({0})`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): - Indices of decoder input sequence tokens in the vocabulary. - - Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are decoder input IDs?](../glossary#decoder-input-ids) - - Bart uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` - is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). - - For translation and summarization training, `decoder_input_ids` should be provided. If no - `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right - for denoising pre-training following the paper. - decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): - will be made by default and ignore pad tokens. It is not recommended to set this for most use cases. - decoder_position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the - range `[0, config.max_position_embeddings - 1]`. - head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): - Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): - Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): - Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - encoder_outputs (`tf.FloatTensor`, *optional*): - hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. - of shape `(batch_size, sequence_length, hidden_size)` is a sequence of - past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) - contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. - If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that - don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all - `decoder_input_ids` of shape `(batch_size, sequence_length)`. - use_cache (`bool`, *optional*, defaults to `True`): - If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see - `past_key_values`). Set to `False` during training, `True` during generation - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the - config will be used instead. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. This argument can be used only in eager mode, in graph mode the value in the config will be - used instead. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in - eager mode, in graph mode the value will always be set to True. - training (`bool`, *optional*, defaults to `False`): - Whether or not to use the model in training mode (some modules like dropout modules have different - behaviors between training and evaluation). -""" - - -@keras_serializable -class TFBartEncoder(tf.keras.layers.Layer): - config_class = BartConfig - """ - Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a - [`TFBartEncoderLayer`]. - - Args: - config: BartConfig - """ - - def __init__(self, config: BartConfig, embed_tokens: Optional[tf.keras.layers.Embedding] = None, **kwargs): - super().__init__(**kwargs) - self.config = config - self.dropout = tf.keras.layers.Dropout(config.dropout) - self.layerdrop = config.encoder_layerdrop - self.padding_idx = config.pad_token_id - self.max_source_positions = config.max_position_embeddings - self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0 - - self.embed_tokens = embed_tokens - self.embed_positions = TFBartLearnedPositionalEmbedding( - config.max_position_embeddings, - config.d_model, - name="embed_positions", - ) - self.layers = [TFBartEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)] - self.layernorm_embedding = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding") - - @unpack_inputs - def call( - self, - input_ids: Optional[TFModelInputType] = None, - inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, - attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - training: Optional[bool] = False, - ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: - """ - Args: - input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): - Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you - provide it. - - Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional): - Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. - This is useful if you want more control over how to convert `input_ids` indices into associated vectors - than the model's internal embedding lookup matrix. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. - """ - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") - elif input_ids is not None: - input_shape = shape_list(input_ids) - elif inputs_embeds is not None: - input_shape = shape_list(inputs_embeds)[:-1] - else: - raise ValueError("You have to specify either input_ids or inputs_embeds") - - if inputs_embeds is None: - # if `self.embed_tokens.load_weight_prefix` is set, runs the embedding operation with the correct name - # scope, so that its weights are registered with the desired name for loading/storing. When `tf.name_scope` - # is used with a name ending in `/`, that name replaces the current name scope. - # (embeddings with tf.name_scope: self.embed_tokens.load_weight_prefix/self.embed_tokens.name/embeddings:0) - context = [] - if hasattr(self.embed_tokens, "load_weight_prefix"): - context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) - with ContextManagers(context): - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" - ), - ) - inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale - - embed_pos = self.embed_positions(input_shape) - hidden_states = inputs_embeds + embed_pos - hidden_states = self.layernorm_embedding(hidden_states) - hidden_states = self.dropout(hidden_states, training=training) - - # check attention mask and invert - if attention_mask is not None: - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - attention_mask = _expand_mask(attention_mask) - else: - attention_mask = None - - encoder_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - - # check if head_mask has a correct number of layers specified if desired - if head_mask is not None: - tf.debugging.assert_equal( - shape_list(head_mask)[0], - len(self.layers), - message=( - f"The head_mask should be specified for {len(self.layers)} layers, but it is for" - f" {shape_list(head_mask)[0]}." - ), - ) - - # encoder layers - for idx, encoder_layer in enumerate(self.layers): - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) - dropout_probability = random.uniform(0, 1) - if training and (dropout_probability < self.layerdrop): # skip the layer - continue - - hidden_states, attn = encoder_layer( - hidden_states, - attention_mask, - head_mask[idx] if head_mask is not None else None, - ) - - if output_attentions: - all_attentions += (attn,) - - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - if not return_dict: - return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) - return TFBaseModelOutput( - last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions - ) - - -@keras_serializable -class TFBartDecoder(tf.keras.layers.Layer): - config_class = BartConfig - """ - Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFBartDecoderLayer`] - - Args: - config: BartConfig - embed_tokens: output embedding - """ - - def __init__(self, config: BartConfig, embed_tokens: Optional[tf.keras.layers.Embedding] = None, **kwargs): - super().__init__(**kwargs) - self.config = config - self.padding_idx = config.pad_token_id - self.embed_tokens = embed_tokens - self.layerdrop = config.decoder_layerdrop - self.embed_positions = TFBartLearnedPositionalEmbedding( - config.max_position_embeddings, - config.d_model, - name="embed_positions", - ) - self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0 - self.layers = [TFBartDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)] - self.layernorm_embedding = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding") - - self.dropout = tf.keras.layers.Dropout(config.dropout) - - @unpack_inputs - def call( - self, - input_ids: Optional[TFModelInputType] = None, - inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, - attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, - encoder_hidden_states: Optional[Union[np.ndarray, tf.Tensor]] = None, - encoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - cross_attn_head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - training: Optional[bool] = False, - ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: - r""" - Args: - input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): - Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you - provide it. - - Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the - range `[0, config.max_position_embeddings - 1]`. - encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): - Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention - of the decoder. - encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): - Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values - selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): - Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): - Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): - Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up - decoding. - - If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those - that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of - all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of shape - `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` - you can choose to directly pass an embedded representation. This is useful if you want more control - over how to convert `input_ids` indices into associated vectors than the model's internal embedding - lookup matrix. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. - """ - - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") - elif input_ids is not None: - input_shape = shape_list(input_ids) - elif inputs_embeds is not None: - input_shape = shape_list(inputs_embeds)[:-1] - else: - raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") - - past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0 - - # embed positions - if position_ids is None: - positions = self.embed_positions(input_shape, past_key_values_length) - else: - positions = self.embed_positions(input_shape, position_ids=position_ids) - - if inputs_embeds is None: - # if `self.embed_tokens.load_weight_prefix` is set, runs the embedding operation with the correct name - # scope, so that its weights are registered with the desired name for loading/storing. When `tf.name_scope` - # is used with a name ending in `/`, that name replaces the current name scope. - # (embeddings with tf.name_scope: self.embed_tokens.load_weight_prefix/self.embed_tokens.name/embeddings:0) - context = [] - if hasattr(self.embed_tokens, "load_weight_prefix"): - context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/")) - with ContextManagers(context): - # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound - # indices on GPU, returning zeros instead. This is a dangerous silent behavior. - tf.debugging.assert_less( - input_ids, - tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), - message=( - "input_ids must be smaller than the embedding layer's input dimension (got" - f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" - ), - ) - inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale - - hidden_states = inputs_embeds - - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - if input_shape[-1] > 1: - combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length) - else: - combined_attention_mask = _expand_mask( - tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1] - ) - - if attention_mask is not None: - combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1]) - - if encoder_hidden_states is not None and encoder_attention_mask is not None: - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1]) - - hidden_states = self.layernorm_embedding(hidden_states + positions) - hidden_states = self.dropout(hidden_states, training=training) - - # decoder layers - all_hidden_states = () if output_hidden_states else None - all_self_attns = () if output_attentions else None - all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None - present_key_values = () if use_cache else None - - # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired - for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: - if attn_mask is not None: - tf.debugging.assert_equal( - shape_list(attn_mask)[0], - len(self.layers), - message=( - f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for" - f" {shape_list(attn_mask)[0]}." - ), - ) - - for idx, decoder_layer in enumerate(self.layers): - # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) - if output_hidden_states: - all_hidden_states += (hidden_states,) - - dropout_probability = random.uniform(0, 1) - - if training and (dropout_probability < self.layerdrop): - continue - - past_key_value = past_key_values[idx] if past_key_values is not None else None - - hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer( - hidden_states, - attention_mask=combined_attention_mask, - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=encoder_attention_mask, - layer_head_mask=head_mask[idx] if head_mask is not None else None, - cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, - past_key_value=past_key_value, - ) - - if use_cache: - present_key_values += (present_key_value,) - - if output_attentions: - all_self_attns += (layer_self_attn,) - - if encoder_hidden_states is not None: - all_cross_attns += (layer_cross_attn,) - - if output_hidden_states: - all_hidden_states += (hidden_states,) - - if not return_dict: - return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns - else: - return TFBaseModelOutputWithPastAndCrossAttentions( - last_hidden_state=hidden_states, - past_key_values=present_key_values, - hidden_states=all_hidden_states, - attentions=all_self_attns, - cross_attentions=all_cross_attns, - ) - - -@keras_serializable -class TFBartMainLayer(tf.keras.layers.Layer): - config_class = BartConfig - - def __init__(self, config: BartConfig, load_weight_prefix=None, **kwargs): - super().__init__(**kwargs) - self.config = config - self.shared = tf.keras.layers.Embedding( - input_dim=config.vocab_size, - output_dim=config.d_model, - embeddings_initializer=tf.keras.initializers.TruncatedNormal(stddev=self.config.init_std), - name="model.shared", - ) - # Additional attribute to specify the expected name scope of the layer (for loading/storing weights) - self.shared.load_weight_prefix = "model.shared" if load_weight_prefix is None else load_weight_prefix - - self.encoder = TFBartEncoder(config, self.shared, name="encoder") - self.decoder = TFBartDecoder(config, self.shared, name="decoder") - - def get_input_embeddings(self): - return self.shared - - def set_input_embeddings(self, new_embeddings): - self.shared = new_embeddings - self.encoder.embed_tokens = self.shared - self.decoder.embed_tokens = self.shared - - @unpack_inputs - def call( - self, - input_ids: Optional[TFModelInputType] = None, - attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - decoder_input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, - decoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - decoder_position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, - head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - decoder_head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - cross_attn_head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None, - past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, - inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, - decoder_inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - training: Optional[bool] = False, - **kwargs, - ) -> Union[TFSeq2SeqModelOutput, Tuple[tf.Tensor]]: - # different to other models, Bart automatically creates decoder_input_ids from - # input_ids if no decoder_input_ids are provided - if decoder_input_ids is None and decoder_inputs_embeds is None: - if input_ids is None: - raise ValueError( - "If no `decoder_input_ids` or `decoder_inputs_embeds` are " - "passed, `input_ids` cannot be `None`. Please pass either " - "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`." - ) - - decoder_input_ids = shift_tokens_right( - input_ids, self.config.pad_token_id, self.config.decoder_start_token_id - ) - - if encoder_outputs is None: - encoder_outputs = self.encoder( - input_ids=input_ids, - attention_mask=attention_mask, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True - elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput): - encoder_outputs = TFBaseModelOutput( - last_hidden_state=encoder_outputs[0], - hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, - attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, - ) - # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False - elif not return_dict and not isinstance(encoder_outputs, tuple): - encoder_outputs = encoder_outputs.to_tuple() - - decoder_outputs = self.decoder( - decoder_input_ids, - attention_mask=decoder_attention_mask, - position_ids=decoder_position_ids, - encoder_hidden_states=encoder_outputs[0], - encoder_attention_mask=attention_mask, - head_mask=decoder_head_mask, - cross_attn_head_mask=cross_attn_head_mask, - past_key_values=past_key_values, - inputs_embeds=decoder_inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - - if not return_dict: - return decoder_outputs + encoder_outputs - - return TFSeq2SeqModelOutput( - last_hidden_state=decoder_outputs.last_hidden_state, - past_key_values=decoder_outputs.past_key_values, - decoder_hidden_states=decoder_outputs.hidden_states, - decoder_attentions=decoder_outputs.attentions, - cross_attentions=decoder_outputs.cross_attentions, - encoder_last_hidden_state=encoder_outputs.last_hidden_state, - encoder_hidden_states=encoder_outputs.hidden_states, - encoder_attentions=encoder_outputs.attentions, - ) - - -@add_start_docstrings( - "The bare BART Model outputting raw hidden-states without any specific head on top.", - BART_START_DOCSTRING, -) -class TFBartModel(TFBartPretrainedModel): - _requires_load_weight_prefix = True - - def __init__(self, config: BartConfig, load_weight_prefix=None, *inputs, **kwargs): - super().__init__(config, *inputs, **kwargs) - - self.model = TFBartMainLayer(config, load_weight_prefix=load_weight_prefix, name="model") - - def get_encoder(self): - return self.model.encoder - - def get_decoder(self): - return self.model.decoder - - @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @add_code_sample_docstrings( - checkpoint=_CHECKPOINT_FOR_DOC, - output_type=TFSeq2SeqModelOutput, - config_class=_CONFIG_FOR_DOC, - ) - @unpack_inputs - def call( - self, - input_ids: Optional[TFModelInputType] = None, - attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - decoder_input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, - decoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - decoder_position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, - head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - decoder_head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - cross_attn_head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None, - past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, - inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, - decoder_inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - training: Optional[bool] = False, - **kwargs, - ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: - outputs = self.model( - input_ids=input_ids, - attention_mask=attention_mask, - decoder_input_ids=decoder_input_ids, - decoder_attention_mask=decoder_attention_mask, - decoder_position_ids=decoder_position_ids, - head_mask=head_mask, - decoder_head_mask=decoder_head_mask, - cross_attn_head_mask=cross_attn_head_mask, - encoder_outputs=encoder_outputs, - past_key_values=past_key_values, - inputs_embeds=inputs_embeds, - decoder_inputs_embeds=decoder_inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - - return outputs - - def serving_output(self, output): - pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None - dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None - dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None - cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None - enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None - enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None - - return TFSeq2SeqModelOutput( - last_hidden_state=output.last_hidden_state, - past_key_values=pkv, - decoder_hidden_states=dec_hs, - decoder_attentions=dec_attns, - cross_attentions=cross_attns, - encoder_last_hidden_state=output.encoder_last_hidden_state, - encoder_hidden_states=enc_hs, - encoder_attentions=enc_attns, - ) - - -class BiasLayer(tf.keras.layers.Layer): - """ - Bias as a layer. It is used for serialization purposes: `tf.keras.Model.save_weights` stores on a per-layer basis, - so all weights have to be registered in a layer. - """ - - def __init__(self, shape, initializer, trainable, name, **kwargs): - super().__init__(name=name, **kwargs) - # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of - # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see: - # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214 - self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable) - - def call(self, x): - return x + self.bias - - -@add_start_docstrings( - "The BART Model with a language modeling head. Can be used for summarization.", - BART_START_DOCSTRING, -) -class TFBartForConditionalGeneration(TFBartPretrainedModel, TFCausalLanguageModelingLoss): - _keys_to_ignore_on_load_missing = [r"final_logits_bias"] - _requires_load_weight_prefix = True - - def __init__(self, config, load_weight_prefix=None, *inputs, **kwargs): - super().__init__(config, *inputs, **kwargs) - self.model = TFBartMainLayer(config, load_weight_prefix=load_weight_prefix, name="model") - self.use_cache = config.use_cache - # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency. - self.bias_layer = BiasLayer( - name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False - ) - - def get_decoder(self): - return self.model.decoder - - def get_encoder(self): - return self.model.encoder - - def get_output_embeddings(self): - return self.get_input_embeddings() - - def set_output_embeddings(self, value): - self.set_input_embeddings(value) - - def get_bias(self): - return {"final_logits_bias": self.bias_layer.bias} - - def set_bias(self, value): - # Replaces the existing layers containing bias for correct (de)serialization. - vocab_size = value["final_logits_bias"].shape[-1] - self.bias_layer = BiasLayer( - name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=False - ) - self.bias_layer.bias.assign(value["final_logits_bias"]) - - @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) - @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) - @add_end_docstrings(BART_GENERATION_EXAMPLE) - @unpack_inputs - def call( - self, - input_ids: Optional[TFModelInputType] = None, - attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - decoder_input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, - decoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - decoder_position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, - head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - decoder_head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - cross_attn_head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - encoder_outputs: Optional[TFBaseModelOutput] = None, - past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, - inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, - decoder_inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - labels: Optional[tf.Tensor] = None, - training: Optional[bool] = False, - ) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]: - r""" - labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., - config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored - (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. - - Returns: - - """ - - if labels is not None: - labels = tf.where( - labels == self.config.pad_token_id, - tf.cast(tf.fill(shape_list(labels), -100), labels.dtype), - labels, - ) - use_cache = False - if decoder_input_ids is None and decoder_inputs_embeds is None: - decoder_input_ids = shift_tokens_right( - labels, self.config.pad_token_id, self.config.decoder_start_token_id - ) - - outputs = self.model( - input_ids, - attention_mask=attention_mask, - decoder_input_ids=decoder_input_ids, - encoder_outputs=encoder_outputs, - decoder_attention_mask=decoder_attention_mask, - decoder_position_ids=decoder_position_ids, - head_mask=head_mask, - decoder_head_mask=decoder_head_mask, - cross_attn_head_mask=cross_attn_head_mask, - past_key_values=past_key_values, - inputs_embeds=inputs_embeds, - decoder_inputs_embeds=decoder_inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True) - lm_logits = self.bias_layer(lm_logits) - masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) - - if not return_dict: - output = (lm_logits,) + outputs[1:] - return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output - return TFSeq2SeqLMOutput( - loss=masked_lm_loss, - logits=lm_logits, - past_key_values=outputs.past_key_values, # index 1 of d outputs - decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs - decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs - cross_attentions=outputs.cross_attentions, # index 4 of d outputs - encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs - encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out - encoder_attentions=outputs.encoder_attentions, # 2 of e out - ) - - def serving_output(self, output): - pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None - dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None - dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None - cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None - enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None - enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None - - return TFSeq2SeqLMOutput( - logits=output.logits, - past_key_values=pkv, - decoder_hidden_states=dec_hs, - decoder_attentions=dec_attns, - cross_attentions=cross_attns, - encoder_last_hidden_state=output.encoder_last_hidden_state, - encoder_hidden_states=enc_hs, - encoder_attentions=enc_attns, - ) - - def prepare_inputs_for_generation( - self, - decoder_input_ids, - past_key_values=None, - attention_mask=None, - decoder_attention_mask=None, - head_mask=None, - decoder_head_mask=None, - cross_attn_head_mask=None, - use_cache=None, - encoder_outputs=None, - **kwargs, - ): - # cut decoder_input_ids if past_key_values is used - if past_key_values is not None: - decoder_input_ids = decoder_input_ids[:, -1:] - - if decoder_attention_mask is not None: # xla - decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:] - elif past_key_values is not None: # no xla + past_key_values - decoder_position_ids = past_key_values[0][0].shape[2] - else: # no xla + no past_key_values - decoder_position_ids = tf.range(decoder_input_ids.shape[1]) - - return { - "input_ids": None, # encoder_outputs is defined. input_ids not needed - "encoder_outputs": encoder_outputs, - "past_key_values": past_key_values, - "decoder_input_ids": decoder_input_ids, - "attention_mask": attention_mask, - "decoder_attention_mask": decoder_attention_mask, - "decoder_position_ids": decoder_position_ids, - "head_mask": head_mask, - "decoder_head_mask": decoder_head_mask, - "cross_attn_head_mask": cross_attn_head_mask, - "use_cache": use_cache, # change this to avoid caching (presumably for debugging) - } - - def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor): - return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) - - -@add_start_docstrings( - """ - Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE - tasks. - """, - BART_START_DOCSTRING, -) -class TFBartForSequenceClassification(TFBartPretrainedModel, TFSequenceClassificationLoss): - @property - def dummy_inputs(self): - pad_token = self.config.pad_token_id - input_ids = tf.constant([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]]) - dummy_inputs = { - "attention_mask": tf.cast(tf.math.not_equal(input_ids, (pad_token)), dtype=tf.int32), - "input_ids": input_ids, - } - return dummy_inputs - - def __init__(self, config: BartConfig, load_weight_prefix=None, *inputs, **kwargs): - super().__init__(config, *inputs, **kwargs) - self.model = TFBartMainLayer(config, load_weight_prefix=load_weight_prefix, name="model") - self.classification_head = TFBartClassificationHead( - config.d_model, config.num_labels, config.classifier_dropout, name="classification_head" - ) - - @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) - @replace_return_docstrings(output_type=TFSeq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) - @unpack_inputs - def call( - self, - input_ids: Optional[TFModelInputType] = None, - attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - decoder_input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, - decoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - decoder_position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, - head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - decoder_head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - cross_attn_head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, - encoder_outputs: Optional[TFBaseModelOutput] = None, - past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, - inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, - decoder_inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - labels: Optional[tf.Tensor] = None, - training: Optional[bool] = False, - ) -> Union[TFSeq2SeqSequenceClassifierOutput, Tuple[tf.Tensor]]: - r""" - labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., - config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). - - Returns: - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - if labels is not None: - use_cache = False - - if input_ids is None and inputs_embeds is not None: - raise NotImplementedError( - f"Passing input embeddings is currently not supported for {self.__class__.__name__}" - ) - - outputs = self.model( - input_ids=input_ids, - attention_mask=attention_mask, - decoder_input_ids=decoder_input_ids, - decoder_attention_mask=decoder_attention_mask, - decoder_position_ids=decoder_position_ids, - head_mask=head_mask, - decoder_head_mask=decoder_head_mask, - cross_attn_head_mask=cross_attn_head_mask, - encoder_outputs=encoder_outputs, - past_key_values=past_key_values, - inputs_embeds=inputs_embeds, - decoder_inputs_embeds=decoder_inputs_embeds, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - training=training, - ) - - last_hidden_state = outputs[0] - eos_mask = tf.equal(input_ids, self.config.eos_token_id) - # out the rows with False where present. Then verify all the final - # entries are True - self_masked = tf.reshape(tf.boolean_mask(eos_mask, eos_mask), (tf.shape(input_ids)[0], -1)) - tf.Assert(tf.reduce_all(self_masked[:, -1]), ["All examples must have the same number of tokens."]) - - masked = tf.reshape( - tf.boolean_mask(last_hidden_state, eos_mask), - (tf.shape(input_ids)[0], tf.shape(self_masked)[1], tf.shape(last_hidden_state)[-1]), - ) - - sentence_representation = masked[:, -1, :] - logits = self.classification_head(sentence_representation) - loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) - - if not return_dict: - output = (logits,) + outputs[1:] - return ((loss,) + output) if loss is not None else output - - return TFSeq2SeqSequenceClassifierOutput( - loss=loss, - logits=logits, - past_key_values=outputs.past_key_values, - decoder_hidden_states=outputs.decoder_hidden_states, - decoder_attentions=outputs.decoder_attentions, - cross_attentions=outputs.cross_attentions, - encoder_last_hidden_state=outputs.encoder_last_hidden_state, - encoder_hidden_states=outputs.encoder_hidden_states, - encoder_attentions=outputs.encoder_attentions, - ) - - def serving_output(self, output): - logits = tf.convert_to_tensor(output.logits) - pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None - dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None - dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None - cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None - enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None - enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None - - return TFSeq2SeqSequenceClassifierOutput( - logits=logits, - past_key_values=pkv, - decoder_hidden_states=dec_hs, - decoder_attentions=dec_attns, - cross_attentions=cross_attns, - encoder_last_hidden_state=output.encoder_last_hidden_state, - encoder_hidden_states=enc_hs, - encoder_attentions=enc_attns, - ) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cymem/about.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cymem/about.py deleted file mode 100644 index 880458e8dfd0f266876a40a4790d84a19368f743..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cymem/about.py +++ /dev/null @@ -1,7 +0,0 @@ -__title__ = "cymem" -__version__ = "2.0.7" -__summary__ = "Manage calls to calloc/free through Cython" -__uri__ = "https://github.com/explosion/cymem" -__author__ = "Matthew Honnibal" -__email__ = "matt@explosion.ai" -__license__ = "MIT" diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fastapi/security/http.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fastapi/security/http.py deleted file mode 100644 index 8fc0aafd9fb1c1642970f71231be593361260268..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fastapi/security/http.py +++ /dev/null @@ -1,165 +0,0 @@ -import binascii -from base64 import b64decode -from typing import Optional - -from fastapi.exceptions import HTTPException -from fastapi.openapi.models import HTTPBase as HTTPBaseModel -from fastapi.openapi.models import HTTPBearer as HTTPBearerModel -from fastapi.security.base import SecurityBase -from fastapi.security.utils import get_authorization_scheme_param -from pydantic import BaseModel -from starlette.requests import Request -from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_403_FORBIDDEN - - -class HTTPBasicCredentials(BaseModel): - username: str - password: str - - -class HTTPAuthorizationCredentials(BaseModel): - scheme: str - credentials: str - - -class HTTPBase(SecurityBase): - def __init__( - self, - *, - scheme: str, - scheme_name: Optional[str] = None, - description: Optional[str] = None, - auto_error: bool = True, - ): - self.model = HTTPBaseModel(scheme=scheme, description=description) - self.scheme_name = scheme_name or self.__class__.__name__ - self.auto_error = auto_error - - async def __call__( - self, request: Request - ) -> Optional[HTTPAuthorizationCredentials]: - authorization = request.headers.get("Authorization") - scheme, credentials = get_authorization_scheme_param(authorization) - if not (authorization and scheme and credentials): - if self.auto_error: - raise HTTPException( - status_code=HTTP_403_FORBIDDEN, detail="Not authenticated" - ) - else: - return None - return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials) - - -class HTTPBasic(HTTPBase): - def __init__( - self, - *, - scheme_name: Optional[str] = None, - realm: Optional[str] = None, - description: Optional[str] = None, - auto_error: bool = True, - ): - self.model = HTTPBaseModel(scheme="basic", description=description) - self.scheme_name = scheme_name or self.__class__.__name__ - self.realm = realm - self.auto_error = auto_error - - async def __call__( # type: ignore - self, request: Request - ) -> Optional[HTTPBasicCredentials]: - authorization = request.headers.get("Authorization") - scheme, param = get_authorization_scheme_param(authorization) - if self.realm: - unauthorized_headers = {"WWW-Authenticate": f'Basic realm="{self.realm}"'} - else: - unauthorized_headers = {"WWW-Authenticate": "Basic"} - if not authorization or scheme.lower() != "basic": - if self.auto_error: - raise HTTPException( - status_code=HTTP_401_UNAUTHORIZED, - detail="Not authenticated", - headers=unauthorized_headers, - ) - else: - return None - invalid_user_credentials_exc = HTTPException( - status_code=HTTP_401_UNAUTHORIZED, - detail="Invalid authentication credentials", - headers=unauthorized_headers, - ) - try: - data = b64decode(param).decode("ascii") - except (ValueError, UnicodeDecodeError, binascii.Error): - raise invalid_user_credentials_exc - username, separator, password = data.partition(":") - if not separator: - raise invalid_user_credentials_exc - return HTTPBasicCredentials(username=username, password=password) - - -class HTTPBearer(HTTPBase): - def __init__( - self, - *, - bearerFormat: Optional[str] = None, - scheme_name: Optional[str] = None, - description: Optional[str] = None, - auto_error: bool = True, - ): - self.model = HTTPBearerModel(bearerFormat=bearerFormat, description=description) - self.scheme_name = scheme_name or self.__class__.__name__ - self.auto_error = auto_error - - async def __call__( - self, request: Request - ) -> Optional[HTTPAuthorizationCredentials]: - authorization = request.headers.get("Authorization") - scheme, credentials = get_authorization_scheme_param(authorization) - if not (authorization and scheme and credentials): - if self.auto_error: - raise HTTPException( - status_code=HTTP_403_FORBIDDEN, detail="Not authenticated" - ) - else: - return None - if scheme.lower() != "bearer": - if self.auto_error: - raise HTTPException( - status_code=HTTP_403_FORBIDDEN, - detail="Invalid authentication credentials", - ) - else: - return None - return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials) - - -class HTTPDigest(HTTPBase): - def __init__( - self, - *, - scheme_name: Optional[str] = None, - description: Optional[str] = None, - auto_error: bool = True, - ): - self.model = HTTPBaseModel(scheme="digest", description=description) - self.scheme_name = scheme_name or self.__class__.__name__ - self.auto_error = auto_error - - async def __call__( - self, request: Request - ) -> Optional[HTTPAuthorizationCredentials]: - authorization = request.headers.get("Authorization") - scheme, credentials = get_authorization_scheme_param(authorization) - if not (authorization and scheme and credentials): - if self.auto_error: - raise HTTPException( - status_code=HTTP_403_FORBIDDEN, detail="Not authenticated" - ) - else: - return None - if scheme.lower() != "digest": - raise HTTPException( - status_code=HTTP_403_FORBIDDEN, - detail="Invalid authentication credentials", - ) - return HTTPAuthorizationCredentials(scheme=scheme, credentials=credentials) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fsspec/implementations/libarchive.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fsspec/implementations/libarchive.py deleted file mode 100644 index de862b111d8ffa5141c8ace34849193e105d6460..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fsspec/implementations/libarchive.py +++ /dev/null @@ -1,217 +0,0 @@ -from __future__ import absolute_import, division, print_function - -from contextlib import contextmanager -from ctypes import ( - CFUNCTYPE, - POINTER, - c_int, - c_longlong, - c_void_p, - cast, - create_string_buffer, -) - -import libarchive -import libarchive.ffi as ffi - -from fsspec import open_files -from fsspec.archive import AbstractArchiveFileSystem -from fsspec.implementations.memory import MemoryFile -from fsspec.utils import DEFAULT_BLOCK_SIZE - -# Libarchive requires seekable files or memory only for certain archive -# types. However, since we read the directory first to cache the contents -# and also allow random access to any file, the file-like object needs -# to be seekable no matter what. - -# Seek call-backs (not provided in the libarchive python wrapper) -SEEK_CALLBACK = CFUNCTYPE(c_longlong, c_int, c_void_p, c_longlong, c_int) -read_set_seek_callback = ffi.ffi( - "read_set_seek_callback", [ffi.c_archive_p, SEEK_CALLBACK], c_int, ffi.check_int -) -new_api = hasattr(ffi, "NO_OPEN_CB") - - -@contextmanager -def custom_reader(file, format_name="all", filter_name="all", block_size=ffi.page_size): - """Read an archive from a seekable file-like object. - - The `file` object must support the standard `readinto` and 'seek' methods. - """ - buf = create_string_buffer(block_size) - buf_p = cast(buf, c_void_p) - - def read_func(archive_p, context, ptrptr): - # readinto the buffer, returns number of bytes read - length = file.readinto(buf) - # write the address of the buffer into the pointer - ptrptr = cast(ptrptr, POINTER(c_void_p)) - ptrptr[0] = buf_p - # tell libarchive how much data was written into the buffer - return length - - def seek_func(archive_p, context, offset, whence): - file.seek(offset, whence) - # tell libarchvie the current position - return file.tell() - - read_cb = ffi.READ_CALLBACK(read_func) - seek_cb = SEEK_CALLBACK(seek_func) - - if new_api: - open_cb = ffi.NO_OPEN_CB - close_cb = ffi.NO_CLOSE_CB - else: - open_cb = libarchive.read.OPEN_CALLBACK(ffi.VOID_CB) - close_cb = libarchive.read.CLOSE_CALLBACK(ffi.VOID_CB) - - with libarchive.read.new_archive_read(format_name, filter_name) as archive_p: - read_set_seek_callback(archive_p, seek_cb) - ffi.read_open(archive_p, None, open_cb, read_cb, close_cb) - yield libarchive.read.ArchiveRead(archive_p) - - -class LibArchiveFileSystem(AbstractArchiveFileSystem): - """Compressed archives as a file-system (read-only) - - Supports the following formats: - tar, pax , cpio, ISO9660, zip, mtree, shar, ar, raw, xar, lha/lzh, rar - Microsoft CAB, 7-Zip, WARC - - See the libarchive documentation for further restrictions. - https://www.libarchive.org/ - - Keeps file object open while instance lives. It only works in seekable - file-like objects. In case the filesystem does not support this kind of - file object, it is recommended to cache locally. - - This class is pickleable, but not necessarily thread-safe (depends on the - platform). See libarchive documentation for details. - """ - - root_marker = "" - protocol = "libarchive" - cachable = False - - def __init__( - self, - fo="", - mode="r", - target_protocol=None, - target_options=None, - block_size=DEFAULT_BLOCK_SIZE, - **kwargs, - ): - """ - Parameters - ---------- - fo: str or file-like - Contains ZIP, and must exist. If a str, will fetch file using - :meth:`~fsspec.open_files`, which must return one file exactly. - mode: str - Currently, only 'r' accepted - target_protocol: str (optional) - If ``fo`` is a string, this value can be used to override the - FS protocol inferred from a URL - target_options: dict (optional) - Kwargs passed when instantiating the target FS, if ``fo`` is - a string. - """ - super().__init__(self, **kwargs) - if mode != "r": - raise ValueError("Only read from archive files accepted") - if isinstance(fo, str): - files = open_files(fo, protocol=target_protocol, **(target_options or {})) - if len(files) != 1: - raise ValueError( - 'Path "{}" did not resolve to exactly' - 'one file: "{}"'.format(fo, files) - ) - fo = files[0] - self.of = fo - self.fo = fo.__enter__() # the whole instance is a context - self.block_size = block_size - self.dir_cache = None - - @contextmanager - def _open_archive(self): - self.fo.seek(0) - with custom_reader(self.fo, block_size=self.block_size) as arc: - yield arc - - @classmethod - def _strip_protocol(cls, path): - # file paths are always relative to the archive root - return super()._strip_protocol(path).lstrip("/") - - def _get_dirs(self): - fields = { - "name": "pathname", - "size": "size", - "created": "ctime", - "mode": "mode", - "uid": "uid", - "gid": "gid", - "mtime": "mtime", - } - - if self.dir_cache is not None: - return - - self.dir_cache = {} - list_names = [] - with self._open_archive() as arc: - for entry in arc: - if not entry.isdir and not entry.isfile: - # Skip symbolic links, fifo entries, etc. - continue - self.dir_cache.update( - { - dirname - + "/": {"name": dirname + "/", "size": 0, "type": "directory"} - for dirname in self._all_dirnames(set(entry.name)) - } - ) - f = {key: getattr(entry, fields[key]) for key in fields} - f["type"] = "directory" if entry.isdir else "file" - list_names.append(entry.name) - - self.dir_cache[f["name"]] = f - # libarchive does not seem to return an entry for the directories (at least - # not in all formats), so get the directories names from the files names - self.dir_cache.update( - { - dirname + "/": {"name": dirname + "/", "size": 0, "type": "directory"} - for dirname in self._all_dirnames(list_names) - } - ) - - def _open( - self, - path, - mode="rb", - block_size=None, - autocommit=True, - cache_options=None, - **kwargs, - ): - path = self._strip_protocol(path) - if mode != "rb": - raise NotImplementedError - - data = bytes() - with self._open_archive() as arc: - for entry in arc: - if entry.pathname != path: - continue - - if entry.size == 0: - # empty file, so there are no blocks - break - - for block in entry.get_blocks(entry.size): - data = block - break - else: - raise ValueError - return MemoryFile(fs=self, path=path, data=data) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/google/protobuf/pyext/cpp_message.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/google/protobuf/pyext/cpp_message.py deleted file mode 100644 index ca290299f1bfa83f80c85d0bf70867a4bf52952e..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/google/protobuf/pyext/cpp_message.py +++ /dev/null @@ -1,72 +0,0 @@ -# Protocol Buffers - Google's data interchange format -# Copyright 2008 Google Inc. All rights reserved. -# https://developers.google.com/protocol-buffers/ -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Protocol message implementation hooks for C++ implementation. - -Contains helper functions used to create protocol message classes from -Descriptor objects at runtime backed by the protocol buffer C++ API. -""" - -__author__ = 'tibell@google.com (Johan Tibell)' - -from google.protobuf.internal import api_implementation - - -# pylint: disable=protected-access -_message = api_implementation._c_module -# TODO(jieluo): Remove this import after fix api_implementation -if _message is None: - from google.protobuf.pyext import _message - - -class GeneratedProtocolMessageType(_message.MessageMeta): - - """Metaclass for protocol message classes created at runtime from Descriptors. - - The protocol compiler currently uses this metaclass to create protocol - message classes at runtime. Clients can also manually create their own - classes at runtime, as in this example: - - mydescriptor = Descriptor(.....) - factory = symbol_database.Default() - factory.pool.AddDescriptor(mydescriptor) - MyProtoClass = factory.GetPrototype(mydescriptor) - myproto_instance = MyProtoClass() - myproto.foo_field = 23 - ... - - The above example will not work for nested types. If you wish to include them, - use reflection.MakeClass() instead of manually instantiating the class in - order to create the appropriate class structure. - """ - - # Must be consistent with the protocol-compiler code in - # proto2/compiler/internal/generator.*. - _DESCRIPTOR_KEY = 'DESCRIPTOR' diff --git a/spaces/cihyFjudo/fairness-paper-search/Driver Jmicron Usb To Ata Atapi Bridge Frequently Asked Questions and Answers.md b/spaces/cihyFjudo/fairness-paper-search/Driver Jmicron Usb To Ata Atapi Bridge Frequently Asked Questions and Answers.md deleted file mode 100644 index d2ef8ee9804f3341f4e28633a2a71af1b9e4dbb0..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Driver Jmicron Usb To Ata Atapi Bridge Frequently Asked Questions and Answers.md +++ /dev/null @@ -1,5 +0,0 @@ -
-


(adsbygoogle = window.adsbygoogle || []).push();

  • To: linux-usb@xxxxxxxxxxxxxxx
  • Subject: UAS not working with JMS567 based disk enclosure
  • From: Jack Coulter
  • Date: Tue, 14 Feb 2017 16:40:14 +1100
  • Openpgp: id=EF574A09C098980E667AFF3DFD17BB17080D29CF; url= :11371/pks/lookup?op=get&options=mr&search=0xFD17BB17080D29CF
  • User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Thunderbird/45.7.0
(adsbygoogle = window.adsbygoogle || []).push();Hi,I'm using an external multiple-disk enclosure (specifically a HotwayH82-SU3S2), which from lsusb appears to use a JMS567 SATA-USB bridge:> Bus 002 Device 002: ID 152d:0567 JMicron Technology Corp. / JMicron> USA Technology Corp. JMS567 SATA 6Gb/s bridgeAccording to the manufacturer's product sheet [1] for this chip, itsupports the UAS protocol, but when connected to my system (runningkernel 4.9.8), it falls back to the older usb-storage driver:> /: Bus 02.Port 1: Dev 1, Class=root_hub, Driver=xhci_hcd/4p, 5000M> |__ Port 3: Dev 2, If 0, Class=Mass Storage, Driver=usb-storage, 5000MI had a look at uas-detect.h, specifically uas_use_uas_driver, but Ididn't see any of the warning messages within that function printed todmesg when the device is attached. I added some extra dev_warn callsearlier in the function and determined that uas_find_uas_alt_setting isreturning a negative value. This prompted me to look at the output oflsusb -v for this device:> Bus 002 Device 002: ID 152d:0567 JMicron Technology Corp. / JMicron> USA Technology Corp. JMS567 SATA 6Gb/s bridge> Device Descriptor:> bLength 18> bDescriptorType 1> bcdUSB 3.00> bDeviceClass 0> bDeviceSubClass 0> bDeviceProtocol 0> bMaxPacketSize0 9> idVendor 0x152d JMicron Technology Corp. / JMicron USA> Technology Corp.> idProduct 0x0567 JMS567 SATA 6Gb/s bridge> bcdDevice 2.05> iManufacturer 10 JMicron> iProduct 11 USB to ATA/ATAPI Bridge> iSerial 5 152D00539000> bNumConfigurations 1> Configuration Descriptor:> bLength 9> bDescriptorType 2> wTotalLength 44> bNumInterfaces 1> bConfigurationValue 1> iConfiguration 0> bmAttributes 0xc0> Self Powered> MaxPower 2mA> Interface Descriptor:> bLength 9> bDescriptorType 4> bInterfaceNumber 0> bAlternateSetting 0> bNumEndpoints 2> bInterfaceClass 8 Mass Storage> bInterfaceSubClass 6 SCSI> bInterfaceProtocol 80 Bulk-Only> iInterface 0> Endpoint Descriptor:> bLength 7> bDescriptorType 5> bEndpointAddress 0x81 EP 1 IN> bmAttributes 2> Transfer Type Bulk> Synch Type None> Usage Type Data> wMaxPacketSize 0x0400 1x 1024 bytes> bInterval 0> bMaxBurst 15> Endpoint Descriptor:> bLength 7> bDescriptorType 5> bEndpointAddress 0x02 EP 2 OUT> bmAttributes 2> Transfer Type Bulk> Synch Type None> Usage Type Data> wMaxPacketSize 0x0400 1x 1024 bytes> bInterval 0> bMaxBurst 15> Binary Object Store Descriptor:> bLength 5> bDescriptorType 15> wTotalLength 22> bNumDeviceCaps 2> USB 2.0 Extension Device Capability:> bLength 7> bDescriptorType 16> bDevCapabilityType 2> bmAttributes 0x00000002> HIRD Link Power Management (LPM) Supported> SuperSpeed USB Device Capability:> bLength 10> bDescriptorType 16> bDevCapabilityType 3> bmAttributes 0x00> wSpeedsSupported 0x000e> Device can operate at Full Speed (12Mbps)> Device can operate at High Speed (480Mbps)> Device can operate at SuperSpeed (5Gbps)> bFunctionalitySupport 1> Lowest fully-functional device speed is Full Speed (12Mbps)> bU1DevExitLat 10 micro seconds> bU2DevExitLat 2047 micro seconds> Device Status: 0x000d> Self Powered> U1 Enabled> U2 EnabledIt seems that it's lacking the interface descriptor for UAS, when Icompared it to the output from a different enclosure with which UASworks correctly:> Bus 002 Device 092: ID 174c:1351 ASMedia Technology Inc.> Device Descriptor:> bLength 18> bDescriptorType 1> bcdUSB 3.10> bDeviceClass 0> bDeviceSubClass 0> bDeviceProtocol 0> bMaxPacketSize0 9> idVendor 0x174c ASMedia Technology Inc.> idProduct 0x1351> bcdDevice 0.01> iManufacturer 2> iProduct 3> iSerial 1> bNumConfigurations 1> Configuration Descriptor:> bLength 9> bDescriptorType 2> wTotalLength 121> bNumInterfaces 1> bConfigurationValue 1> iConfiguration 0> bmAttributes 0xc0> Self Powered> MaxPower 0mA> Interface Descriptor:> bLength 9> bDescriptorType 4> bInterfaceNumber 0> bAlternateSetting 0> bNumEndpoints 2> bInterfaceClass 8 Mass Storage> bInterfaceSubClass 6 SCSI> bInterfaceProtocol 80 Bulk-Only> iInterface 0> Endpoint Descriptor:> bLength 7> bDescriptorType 5> bEndpointAddress 0x81 EP 1 IN> bmAttributes 2> Transfer Type Bulk> Synch Type None> Usage Type Data> wMaxPacketSize 0x0400 1x 1024 bytes> bInterval 0> bMaxBurst 15> Endpoint Descriptor:> bLength 7> bDescriptorType 5> bEndpointAddress 0x02 EP 2 OUT> bmAttributes 2> Transfer Type Bulk> Synch Type None> Usage Type Data> wMaxPacketSize 0x0400 1x 1024 bytes> bInterval 0> bMaxBurst 15> Interface Descriptor:> bLength 9> bDescriptorType 4> bInterfaceNumber 0> bAlternateSetting 1> bNumEndpoints 4> bInterfaceClass 8 Mass Storage> bInterfaceSubClass 6 SCSI> bInterfaceProtocol 98> iInterface 0> Endpoint Descriptor:> bLength 7> bDescriptorType 5> bEndpointAddress 0x81 EP 1 IN> bmAttributes 2> Transfer Type Bulk> Synch Type None> Usage Type Data> wMaxPacketSize 0x0400 1x 1024 bytes> bInterval 0> bMaxBurst 15> MaxStreams 32> Data-in pipe (0x03)> Endpoint Descriptor:> bLength 7> bDescriptorType 5> bEndpointAddress 0x02 EP 2 OUT> bmAttributes 2> Transfer Type Bulk> Synch Type None> Usage Type Data> wMaxPacketSize 0x0400 1x 1024 bytes> bInterval 0> bMaxBurst 15> MaxStreams 32> Data-out pipe (0x04)> Endpoint Descriptor:> bLength 7> bDescriptorType 5> bEndpointAddress 0x83 EP 3 IN> bmAttributes 2> Transfer Type Bulk> Synch Type None> Usage Type Data> wMaxPacketSize 0x0400 1x 1024 bytes> bInterval 0> bMaxBurst 15> MaxStreams 32> Status pipe (0x02)> Endpoint Descriptor:> bLength 7> bDescriptorType 5> bEndpointAddress 0x04 EP 4 OUT> bmAttributes 2> Transfer Type Bulk> Synch Type None> Usage Type Data> wMaxPacketSize 0x0400 1x 1024 bytes> bInterval 0> bMaxBurst 0> Command pipe (0x01)What would cause the missing interface descriptor? Looking atunusual_uas.h, I can see this device's USB product id listed with somequirks, but if my understanding is correct, it should still work, andgiven the presence of the exact ID, I imagine that this device is indeedsupported by the current kernel, but I don't understand why it's notworking as it should. Is this a hardware issue with the JMS567-basedenclosure, or is it a kernel issue?[1] regards,JackAttachment:signature.asc
Description: OpenPGP digital signature

-

Driver Jmicron Usb To Ata Atapi Bridge


DOWNLOAD ->->->-> https://tinurli.com/2uwkls



aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/clem/dreambooth-pareidolia/train_dreambooth.py b/spaces/clem/dreambooth-pareidolia/train_dreambooth.py deleted file mode 100644 index a496382fbc895961b9902c33a9d5cc926d4fcc8d..0000000000000000000000000000000000000000 --- a/spaces/clem/dreambooth-pareidolia/train_dreambooth.py +++ /dev/null @@ -1,881 +0,0 @@ -import argparse -import itertools -import math -import os -from pathlib import Path -from typing import Optional -import subprocess -import sys -import gc -import random - -import torch -import torch.nn.functional as F -import torch.utils.checkpoint -from torch.utils.data import Dataset - -from accelerate import Accelerator -from accelerate.logging import get_logger -from accelerate.utils import set_seed -from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel -from diffusers.optimization import get_scheduler -from huggingface_hub import HfFolder, Repository, whoami -from PIL import Image -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import CLIPTextModel, CLIPTokenizer - - -logger = get_logger(__name__) - - -def parse_args(): - parser = argparse.ArgumentParser(description="Simple example of a training script.") - parser.add_argument( - "--pretrained_model_name_or_path", - type=str, - default=None, - #required=True, - help="Path to pretrained model or model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--tokenizer_name", - type=str, - default=None, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--instance_data_dir", - type=str, - default=None, - #required=True, - help="A folder containing the training data of instance images.", - ) - parser.add_argument( - "--class_data_dir", - type=str, - default=None, - #required=False, - help="A folder containing the training data of class images.", - ) - parser.add_argument( - "--instance_prompt", - type=str, - default=None, - help="The prompt with identifier specifying the instance", - ) - parser.add_argument( - "--class_prompt", - type=str, - default="", - help="The prompt to specify images in the same class as provided instance images.", - ) - parser.add_argument( - "--with_prior_preservation", - default=False, - action="store_true", - help="Flag to add prior preservation loss.", - ) - parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") - parser.add_argument( - "--num_class_images", - type=int, - default=100, - help=( - "Minimal class images for prior preservation loss. If not have enough images, additional images will be" - " sampled with class_prompt." - ), - ) - parser.add_argument( - "--output_dir", - type=str, - default="", - help="The output directory where the model predictions and checkpoints will be written.", - ) - parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") - parser.add_argument( - "--resolution", - type=int, - default=512, - help=( - "The resolution for input images, all the images in the train/validation dataset will be resized to this" - " resolution" - ), - ) - parser.add_argument( - "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution" - ) - parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") - parser.add_argument( - "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." - ) - parser.add_argument( - "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." - ) - parser.add_argument("--num_train_epochs", type=int, default=1) - parser.add_argument( - "--max_train_steps", - type=int, - default=None, - help="Total number of training steps to perform. If provided, overrides num_train_epochs.", - ) - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument( - "--gradient_checkpointing", - action="store_true", - help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", - ) - parser.add_argument( - "--learning_rate", - type=float, - default=5e-6, - help="Initial learning rate (after the potential warmup period) to use.", - ) - parser.add_argument( - "--scale_lr", - action="store_true", - default=False, - help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", - ) - parser.add_argument( - "--lr_scheduler", - type=str, - default="constant", - help=( - 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' - ' "constant", "constant_with_warmup"]' - ), - ) - parser.add_argument( - "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." - ) - parser.add_argument( - "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." - ) - parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") - parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") - parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") - parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") - parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") - parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") - parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") - parser.add_argument( - "--hub_model_id", - type=str, - default=None, - help="The name of the repository to keep in sync with the local `output_dir`.", - ) - parser.add_argument( - "--logging_dir", - type=str, - default="logs", - help=( - "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" - " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." - ), - ) - parser.add_argument( - "--mixed_precision", - type=str, - default="no", - choices=["no", "fp16", "bf16"], - help=( - "Whether to use mixed precision. Choose" - "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." - "and an Nvidia Ampere GPU." - ), - ) - - parser.add_argument( - "--save_n_steps", - type=int, - default=1, - help=("Save the model every n global_steps"), - ) - - - parser.add_argument( - "--save_starting_step", - type=int, - default=1, - help=("The step from which it starts saving intermediary checkpoints"), - ) - - parser.add_argument( - "--stop_text_encoder_training", - type=int, - default=1000000, - help=("The step at which the text_encoder is no longer trained"), - ) - - - parser.add_argument( - "--image_captions_filename", - action="store_true", - help="Get captions from filename", - ) - - - parser.add_argument( - "--dump_only_text_encoder", - action="store_true", - default=False, - help="Dump only text encoder", - ) - - parser.add_argument( - "--train_only_unet", - action="store_true", - default=False, - help="Train only the unet", - ) - - parser.add_argument( - "--cache_latents", - action="store_true", - default=False, - help="Train only the unet", - ) - - parser.add_argument( - "--Session_dir", - type=str, - default="", - help="Current session directory", - ) - - - - - parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") - - args = parser.parse_args() - env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) - if env_local_rank != -1 and env_local_rank != args.local_rank: - args.local_rank = env_local_rank - - #if args.instance_data_dir is None: - # raise ValueError("You must specify a train data directory.") - - #if args.with_prior_preservation: - # if args.class_data_dir is None: - # raise ValueError("You must specify a data directory for class images.") - # if args.class_prompt is None: - # raise ValueError("You must specify prompt for class images.") - - return args - - -class DreamBoothDataset(Dataset): - """ - A dataset to prepare the instance and class images with the prompts for fine-tuning the model. - It pre-processes the images and the tokenizes prompts. - """ - - def __init__( - self, - instance_data_root, - instance_prompt, - tokenizer, - args, - class_data_root=None, - class_prompt=None, - size=512, - center_crop=False, - ): - self.size = size - self.center_crop = center_crop - self.tokenizer = tokenizer - self.image_captions_filename = None - - self.instance_data_root = Path(instance_data_root) - if not self.instance_data_root.exists(): - raise ValueError("Instance images root doesn't exists.") - - self.instance_images_path = list(Path(instance_data_root).iterdir()) - self.num_instance_images = len(self.instance_images_path) - self.instance_prompt = instance_prompt - self._length = self.num_instance_images - - if args.image_captions_filename: - self.image_captions_filename = True - - if class_data_root is not None: - self.class_data_root = Path(class_data_root) - self.class_data_root.mkdir(parents=True, exist_ok=True) - self.class_images_path = list(self.class_data_root.iterdir()) - random.shuffle(self.class_images_path) - self.num_class_images = len(self.class_images_path) - self._length = max(self.num_class_images, self.num_instance_images) - self.class_prompt = class_prompt - else: - self.class_data_root = None - - self.image_transforms = transforms.Compose( - [ - transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), - transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), - transforms.ToTensor(), - transforms.Normalize([0.5], [0.5]), - ] - ) - - def __len__(self): - return self._length - - def __getitem__(self, index): - example = {} - path = self.instance_images_path[index % self.num_instance_images] - instance_image = Image.open(path) - if not instance_image.mode == "RGB": - instance_image = instance_image.convert("RGB") - - instance_prompt = self.instance_prompt - - if self.image_captions_filename: - filename = Path(path).stem - pt=''.join([i for i in filename if not i.isdigit()]) - pt=pt.replace("_"," ") - pt=pt.replace("(","") - pt=pt.replace(")","") - pt=pt.replace("-","") - instance_prompt = pt - sys.stdout.write(" " +instance_prompt+" ") - sys.stdout.flush() - - - example["instance_images"] = self.image_transforms(instance_image) - example["instance_prompt_ids"] = self.tokenizer( - instance_prompt, - padding="do_not_pad", - truncation=True, - max_length=self.tokenizer.model_max_length, - ).input_ids - - if self.class_data_root: - class_image = Image.open(self.class_images_path[index % self.num_class_images]) - if not class_image.mode == "RGB": - class_image = class_image.convert("RGB") - example["class_images"] = self.image_transforms(class_image) - example["class_prompt_ids"] = self.tokenizer( - self.class_prompt, - padding="do_not_pad", - truncation=True, - max_length=self.tokenizer.model_max_length, - ).input_ids - - return example - - - -class PromptDataset(Dataset): - "A simple dataset to prepare the prompts to generate class images on multiple GPUs." - - def __init__(self, prompt, num_samples): - self.prompt = prompt - self.num_samples = num_samples - - def __len__(self): - return self.num_samples - - def __getitem__(self, index): - example = {} - example["prompt"] = self.prompt - example["index"] = index - return example - -class LatentsDataset(Dataset): - def __init__(self, latents_cache, text_encoder_cache): - self.latents_cache = latents_cache - self.text_encoder_cache = text_encoder_cache - - def __len__(self): - return len(self.latents_cache) - - def __getitem__(self, index): - return self.latents_cache[index], self.text_encoder_cache[index] - -def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None): - if token is None: - token = HfFolder.get_token() - if organization is None: - username = whoami(token)["name"] - return f"{username}/{model_id}" - else: - return f"{organization}/{model_id}" - -def merge_two_dicts(starting_dict: dict, updater_dict: dict) -> dict: - """ - Starts from base starting dict and then adds the remaining key values from updater replacing the values from - the first starting/base dict with the second updater dict. - - For later: how does d = {**d1, **d2} replace collision? - - :param starting_dict: - :param updater_dict: - :return: - """ - new_dict: dict = starting_dict.copy() # start with keys and values of starting_dict - new_dict.update(updater_dict) # modifies starting_dict with keys and values of updater_dict - return new_dict - -def merge_args(args1: argparse.Namespace, args2: argparse.Namespace) -> argparse.Namespace: - """ - - ref: https://stackoverflow.com/questions/56136549/how-can-i-merge-two-argparse-namespaces-in-python-2-x - :param args1: - :param args2: - :return: - """ - # - the merged args - # The vars() function returns the __dict__ attribute to values of the given object e.g {field:value}. - merged_key_values_for_namespace: dict = merge_two_dicts(vars(args1), vars(args2)) - args = argparse.Namespace(**merged_key_values_for_namespace) - return args - -def run_training(args_imported): - args_default = parse_args() - args = merge_args(args_default, args_imported) - print(args) - logging_dir = Path(args.output_dir, args.logging_dir) - i=args.save_starting_step - accelerator = Accelerator( - gradient_accumulation_steps=args.gradient_accumulation_steps, - mixed_precision=args.mixed_precision, - log_with="tensorboard", - logging_dir=logging_dir, - ) - - # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate - # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models. - # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate. - if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1: - raise ValueError( - "Gradient accumulation is not supported when training the text encoder in distributed training. " - "Please set gradient_accumulation_steps to 1. This feature will be supported in the future." - ) - - if args.seed is not None: - set_seed(args.seed) - - if args.with_prior_preservation: - class_images_dir = Path(args.class_data_dir) - if not class_images_dir.exists(): - class_images_dir.mkdir(parents=True) - cur_class_images = len(list(class_images_dir.iterdir())) - - if cur_class_images < args.num_class_images: - torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32 - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, torch_dtype=torch_dtype - ) - pipeline.set_progress_bar_config(disable=True) - - num_new_images = args.num_class_images - cur_class_images - logger.info(f"Number of class images to sample: {num_new_images}.") - - sample_dataset = PromptDataset(args.class_prompt, num_new_images) - sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) - - sample_dataloader = accelerator.prepare(sample_dataloader) - pipeline.to(accelerator.device) - - for example in tqdm( - sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process - ): - with torch.autocast("cuda"): - images = pipeline(example["prompt"]).images - - for i, image in enumerate(images): - image.save(class_images_dir / f"{example['index'][i] + cur_class_images}.jpg") - - del pipeline - if torch.cuda.is_available(): - torch.cuda.empty_cache() - - # Handle the repository creation - if accelerator.is_main_process: - if args.push_to_hub: - if args.hub_model_id is None: - repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) - else: - repo_name = args.hub_model_id - repo = Repository(args.output_dir, clone_from=repo_name) - - with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: - if "step_*" not in gitignore: - gitignore.write("step_*\n") - if "epoch_*" not in gitignore: - gitignore.write("epoch_*\n") - elif args.output_dir is not None: - os.makedirs(args.output_dir, exist_ok=True) - - # Load the tokenizer - if args.tokenizer_name: - tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name) - elif args.pretrained_model_name_or_path: - tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") - - # Load models and create wrapper for stable diffusion - if args.train_only_unet: - if os.path.exists(str(args.output_dir+"/text_encoder_trained")): - text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder_trained") - elif os.path.exists(str(args.output_dir+"/text_encoder")): - text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder") - else: - text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") - else: - text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") - vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae") - unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet") - - vae.requires_grad_(False) - if not args.train_text_encoder: - text_encoder.requires_grad_(False) - - if args.gradient_checkpointing: - unet.enable_gradient_checkpointing() - if args.train_text_encoder: - text_encoder.gradient_checkpointing_enable() - - if args.scale_lr: - args.learning_rate = ( - args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes - ) - - # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs - if args.use_8bit_adam: - try: - import bitsandbytes as bnb - except ImportError: - raise ImportError( - "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." - ) - - optimizer_class = bnb.optim.AdamW8bit - else: - optimizer_class = torch.optim.AdamW - - params_to_optimize = ( - itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() - ) - optimizer = optimizer_class( - params_to_optimize, - lr=args.learning_rate, - betas=(args.adam_beta1, args.adam_beta2), - weight_decay=args.adam_weight_decay, - eps=args.adam_epsilon, - ) - - noise_scheduler = DDPMScheduler.from_config(args.pretrained_model_name_or_path, subfolder="scheduler") - - train_dataset = DreamBoothDataset( - instance_data_root=args.instance_data_dir, - instance_prompt=args.instance_prompt, - class_data_root=args.class_data_dir if args.with_prior_preservation else None, - class_prompt=args.class_prompt, - tokenizer=tokenizer, - size=args.resolution, - center_crop=args.center_crop, - args=args, - ) - - def collate_fn(examples): - input_ids = [example["instance_prompt_ids"] for example in examples] - pixel_values = [example["instance_images"] for example in examples] - - # Concat class and instance examples for prior preservation. - # We do this to avoid doing two forward passes. - if args.with_prior_preservation: - input_ids += [example["class_prompt_ids"] for example in examples] - pixel_values += [example["class_images"] for example in examples] - - pixel_values = torch.stack(pixel_values) - pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() - - input_ids = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt").input_ids - - batch = { - "input_ids": input_ids, - "pixel_values": pixel_values, - } - return batch - - train_dataloader = torch.utils.data.DataLoader( - train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn - ) - - # Scheduler and math around the number of training steps. - overrode_max_train_steps = False - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if args.max_train_steps is None: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - overrode_max_train_steps = True - - lr_scheduler = get_scheduler( - args.lr_scheduler, - optimizer=optimizer, - num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, - num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, - ) - - if args.train_text_encoder: - unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( - unet, text_encoder, optimizer, train_dataloader, lr_scheduler - ) - else: - unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( - unet, optimizer, train_dataloader, lr_scheduler - ) - - weight_dtype = torch.float32 - if args.mixed_precision == "fp16": - weight_dtype = torch.float16 - elif args.mixed_precision == "bf16": - weight_dtype = torch.bfloat16 - - # Move text_encode and vae to gpu. - # For mixed precision training we cast the text_encoder and vae weights to half-precision - # as these models are only used for inference, keeping weights in full precision is not required. - vae.to(accelerator.device, dtype=weight_dtype) - if not args.train_text_encoder: - text_encoder.to(accelerator.device, dtype=weight_dtype) - - - if args.cache_latents: - latents_cache = [] - text_encoder_cache = [] - for batch in tqdm(train_dataloader, desc="Caching latents"): - with torch.no_grad(): - batch["pixel_values"] = batch["pixel_values"].to(accelerator.device, non_blocking=True, dtype=weight_dtype) - batch["input_ids"] = batch["input_ids"].to(accelerator.device, non_blocking=True) - latents_cache.append(vae.encode(batch["pixel_values"]).latent_dist) - if args.train_text_encoder: - text_encoder_cache.append(batch["input_ids"]) - else: - text_encoder_cache.append(text_encoder(batch["input_ids"])[0]) - train_dataset = LatentsDataset(latents_cache, text_encoder_cache) - train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=1, collate_fn=lambda x: x, shuffle=True) - - del vae - #if not args.train_text_encoder: - # del text_encoder - if torch.cuda.is_available(): - torch.cuda.empty_cache() - - # We need to recalculate our total training steps as the size of the training dataloader may have changed. - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if overrode_max_train_steps: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - # Afterwards we recalculate our number of training epochs - args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) - - # We need to initialize the trackers we use, and also store our configuration. - # The trackers initializes automatically on the main process. - if accelerator.is_main_process: - accelerator.init_trackers("dreambooth", config=vars(args)) - - def bar(prg): - br='|'+'█' * prg + ' ' * (25-prg)+'|' - return br - - # Train! - total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps - - logger.info("***** Running training *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num batches each epoch = {len(train_dataloader)}") - logger.info(f" Num Epochs = {args.num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") - logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") - logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") - logger.info(f" Total optimization steps = {args.max_train_steps}") - # Only show the progress bar once on each machine. - progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) - global_step = 0 - - for epoch in range(args.num_train_epochs): - unet.train() - if args.train_text_encoder: - text_encoder.train() - for step, batch in enumerate(train_dataloader): - with accelerator.accumulate(unet): - # Convert images to latent space - with torch.no_grad(): - if args.cache_latents: - latents_dist = batch[0][0] - else: - latents_dist = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist - latents = latents_dist.sample() * 0.18215 - - # Sample noise that we'll add to the latents - noise = torch.randn_like(latents) - bsz = latents.shape[0] - # Sample a random timestep for each image - timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) - timesteps = timesteps.long() - - # Add noise to the latents according to the noise magnitude at each timestep - # (this is the forward diffusion process) - noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) - - # Get the text embedding for conditioning - if(args.cache_latents): - if args.train_text_encoder: - encoder_hidden_states = text_encoder(batch[0][1])[0] - else: - encoder_hidden_states = batch[0][1] - else: - encoder_hidden_states = text_encoder(batch["input_ids"])[0] - - # Predict the noise residual - model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample - - # Get the target for loss depending on the prediction type - if noise_scheduler.config.prediction_type == "epsilon": - target = noise - elif noise_scheduler.config.prediction_type == "v_prediction": - target = noise_scheduler.get_velocity(latents, noise, timesteps) - else: - raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") - - if args.with_prior_preservation: - # Chunk the noise and model_pred into two parts and compute the loss on each part separately. - model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) - target, target_prior = torch.chunk(target, 2, dim=0) - - # Compute instance loss - loss = F.mse_loss(model_pred.float(), target.float(), reduction="none").mean([1, 2, 3]).mean() - - # Compute prior loss - prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") - - # Add the prior loss to the instance loss. - loss = loss + args.prior_loss_weight * prior_loss - else: - loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") - - accelerator.backward(loss) - if accelerator.sync_gradients: - params_to_clip = ( - itertools.chain(unet.parameters(), text_encoder.parameters()) - if args.train_text_encoder - else unet.parameters() - ) - accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) - optimizer.step() - lr_scheduler.step() - optimizer.zero_grad() - - # Checks if the accelerator has performed an optimization step behind the scenes - if accelerator.sync_gradients: - progress_bar.update(1) - global_step += 1 - - fll=round((global_step*100)/args.max_train_steps) - fll=round(fll/4) - pr=bar(fll) - - logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} - progress_bar.set_postfix(**logs) - progress_bar.set_description_str("Progress:"+pr) - accelerator.log(logs, step=global_step) - - if global_step >= args.max_train_steps: - break - - if args.train_text_encoder and global_step == args.stop_text_encoder_training and global_step >= 30: - if accelerator.is_main_process: - print(" " +" Freezing the text_encoder ..."+" ") - frz_dir=args.output_dir + "/text_encoder_frozen" - if os.path.exists(frz_dir): - subprocess.call('rm -r '+ frz_dir, shell=True) - os.mkdir(frz_dir) - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.text_encoder.save_pretrained(frz_dir) - - if args.save_n_steps >= 200: - if global_step < args.max_train_steps and global_step+1==i: - ckpt_name = "_step_" + str(global_step+1) - save_dir = Path(args.output_dir+ckpt_name) - save_dir=str(save_dir) - save_dir=save_dir.replace(" ", "_") - if not os.path.exists(save_dir): - os.mkdir(save_dir) - inst=save_dir[16:] - inst=inst.replace(" ", "_") - print(" SAVING CHECKPOINT: "+args.Session_dir+"/"+inst+".ckpt") - # Create the pipeline using the trained modules and save it. - if accelerator.is_main_process: - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.save_pretrained(save_dir) - frz_dir=args.output_dir + "/text_encoder_frozen" - if args.train_text_encoder and os.path.exists(frz_dir): - subprocess.call('rm -r '+save_dir+'/text_encoder/*.*', shell=True) - subprocess.call('cp -f '+frz_dir +'/*.* '+ save_dir+'/text_encoder', shell=True) - chkpth=args.Session_dir+"/"+inst+".ckpt" - subprocess.call('python /content/diffusers/scripts/convert_diffusers_to_original_stable_diffusion.py --model_path ' + save_dir + ' --checkpoint_path ' + chkpth + ' --half', shell=True) - subprocess.call('rm -r '+ save_dir, shell=True) - i=i+args.save_n_steps - - accelerator.wait_for_everyone() - - # Create the pipeline using using the trained modules and save it. - if accelerator.is_main_process: - if args.dump_only_text_encoder: - txt_dir=args.output_dir + "/text_encoder_trained" - if not os.path.exists(txt_dir): - os.mkdir(txt_dir) - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.text_encoder.save_pretrained(txt_dir) - - elif args.train_only_unet: - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - pipeline.save_pretrained(args.output_dir) - txt_dir=args.output_dir + "/text_encoder_trained" - subprocess.call('rm -r '+txt_dir, shell=True) - - else: - pipeline = StableDiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - ) - frz_dir=args.output_dir + "/text_encoder_frozen" - pipeline.save_pretrained(args.output_dir) - if args.train_text_encoder and os.path.exists(frz_dir): - subprocess.call('mv -f '+frz_dir +'/*.* '+ args.output_dir+'/text_encoder', shell=True) - subprocess.call('rm -r '+ frz_dir, shell=True) - - if args.push_to_hub: - repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True) - - accelerator.end_training() - del pipeline - torch.cuda.empty_cache() - gc.collect() -if __name__ == "__main__": - pass - #main() - diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/contourpy/__init__.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/contourpy/__init__.py deleted file mode 100644 index 006d5f5598fbeea4278c60fd5c4be44de19d5e00..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/contourpy/__init__.py +++ /dev/null @@ -1,253 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING - -import numpy as np - -from contourpy._contourpy import ( - ContourGenerator, FillType, LineType, Mpl2005ContourGenerator, Mpl2014ContourGenerator, - SerialContourGenerator, ThreadedContourGenerator, ZInterp, max_threads, -) -from contourpy._version import __version__ -from contourpy.chunk import calc_chunk_sizes -from contourpy.enum_util import as_fill_type, as_line_type, as_z_interp - -if TYPE_CHECKING: - from typing import Any - - from numpy.typing import ArrayLike - - from ._contourpy import CoordinateArray, MaskArray - -__all__ = [ - "__version__", - "contour_generator", - "max_threads", - "FillType", - "LineType", - "ContourGenerator", - "Mpl2005ContourGenerator", - "Mpl2014ContourGenerator", - "SerialContourGenerator", - "ThreadedContourGenerator", - "ZInterp", -] - - -# Simple mapping of algorithm name to class name. -_class_lookup: dict[str, type[ContourGenerator]] = dict( - mpl2005=Mpl2005ContourGenerator, - mpl2014=Mpl2014ContourGenerator, - serial=SerialContourGenerator, - threaded=ThreadedContourGenerator, -) - - -def _remove_z_mask( - z: ArrayLike | np.ma.MaskedArray[Any, Any] | None, -) -> tuple[CoordinateArray, MaskArray | None]: - # Preserve mask if present. - z_array = np.ma.asarray(z, dtype=np.float64) # type: ignore[no-untyped-call] - z_masked = np.ma.masked_invalid(z_array, copy=False) # type: ignore[no-untyped-call] - - if np.ma.is_masked(z_masked): # type: ignore[no-untyped-call] - mask = np.ma.getmask(z_masked) # type: ignore[no-untyped-call] - else: - mask = None - - return np.ma.getdata(z_masked), mask # type: ignore[no-untyped-call] - - -def contour_generator( - x: ArrayLike | None = None, - y: ArrayLike | None = None, - z: ArrayLike | np.ma.MaskedArray[Any, Any] | None = None, - *, - name: str = "serial", - corner_mask: bool | None = None, - line_type: LineType | str | None = None, - fill_type: FillType | str | None = None, - chunk_size: int | tuple[int, int] | None = None, - chunk_count: int | tuple[int, int] | None = None, - total_chunk_count: int | None = None, - quad_as_tri: bool = False, - z_interp: ZInterp | str | None = ZInterp.Linear, - thread_count: int = 0, -) -> ContourGenerator: - """Create and return a contour generator object. - - The class and properties of the contour generator are determined by the function arguments, - with sensible defaults. - - Args: - x (array-like of shape (ny, nx) or (nx,), optional): The x-coordinates of the ``z`` values. - May be 2D with the same shape as ``z.shape``, or 1D with length ``nx = z.shape[1]``. - If not specified are assumed to be ``np.arange(nx)``. Must be ordered monotonically. - y (array-like of shape (ny, nx) or (ny,), optional): The y-coordinates of the ``z`` values. - May be 2D with the same shape as ``z.shape``, or 1D with length ``ny = z.shape[0]``. - If not specified are assumed to be ``np.arange(ny)``. Must be ordered monotonically. - z (array-like of shape (ny, nx), may be a masked array): The 2D gridded values to calculate - the contours of. May be a masked array, and any invalid values (``np.inf`` or - ``np.nan``) will also be masked out. - name (str): Algorithm name, one of ``"serial"``, ``"threaded"``, ``"mpl2005"`` or - ``"mpl2014"``, default ``"serial"``. - corner_mask (bool, optional): Enable/disable corner masking, which only has an effect if - ``z`` is a masked array. If ``False``, any quad touching a masked point is masked out. - If ``True``, only the triangular corners of quads nearest these points are always masked - out, other triangular corners comprising three unmasked points are contoured as usual. - If not specified, uses the default provided by the algorithm ``name``. - line_type (LineType, optional): The format of contour line data returned from calls to - :meth:`~contourpy.ContourGenerator.lines`. If not specified, uses the default provided - by the algorithm ``name``. - fill_type (FillType, optional): The format of filled contour data returned from calls to - :meth:`~contourpy.ContourGenerator.filled`. If not specified, uses the default provided - by the algorithm ``name``. - chunk_size (int or tuple(int, int), optional): Chunk size in (y, x) directions, or the same - size in both directions if only one value is specified. - chunk_count (int or tuple(int, int), optional): Chunk count in (y, x) directions, or the - same count in both directions if only one value is specified. - total_chunk_count (int, optional): Total number of chunks. - quad_as_tri (bool): Enable/disable treating quads as 4 triangles, default ``False``. - If ``False``, a contour line within a quad is a straight line between points on two of - its edges. If ``True``, each full quad is divided into 4 triangles using a virtual point - at the centre (mean x, y of the corner points) and a contour line is piecewise linear - within those triangles. Corner-masked triangles are not affected by this setting, only - full unmasked quads. - z_interp (ZInterp): How to interpolate ``z`` values when determining where contour lines - intersect the edges of quads and the ``z`` values of the central points of quads, - default ``ZInterp.Linear``. - thread_count (int): Number of threads to use for contour calculation, default 0. Threads can - only be used with an algorithm ``name`` that supports threads (currently only - ``name="threaded"``) and there must be at least the same number of chunks as threads. - If ``thread_count=0`` and ``name="threaded"`` then it uses the maximum number of threads - as determined by the C++11 call ``std::thread::hardware_concurrency()``. If ``name`` is - something other than ``"threaded"`` then the ``thread_count`` will be set to ``1``. - - Return: - :class:`~contourpy._contourpy.ContourGenerator`. - - Note: - A maximum of one of ``chunk_size``, ``chunk_count`` and ``total_chunk_count`` may be - specified. - - Warning: - The ``name="mpl2005"`` algorithm does not implement chunking for contour lines. - """ - x = np.asarray(x, dtype=np.float64) - y = np.asarray(y, dtype=np.float64) - z, mask = _remove_z_mask(z) - - # Check arguments: z. - if z.ndim != 2: - raise TypeError(f"Input z must be 2D, not {z.ndim}D") - - if z.shape[0] < 2 or z.shape[1] < 2: - raise TypeError(f"Input z must be at least a (2, 2) shaped array, but has shape {z.shape}") - - ny, nx = z.shape - - # Check arguments: x and y. - if x.ndim != y.ndim: - raise TypeError(f"Number of dimensions of x ({x.ndim}) and y ({y.ndim}) do not match") - - if x.ndim == 0: - x = np.arange(nx, dtype=np.float64) - y = np.arange(ny, dtype=np.float64) - x, y = np.meshgrid(x, y) - elif x.ndim == 1: - if len(x) != nx: - raise TypeError(f"Length of x ({len(x)}) must match number of columns in z ({nx})") - if len(y) != ny: - raise TypeError(f"Length of y ({len(y)}) must match number of rows in z ({ny})") - x, y = np.meshgrid(x, y) - elif x.ndim == 2: - if x.shape != z.shape: - raise TypeError(f"Shapes of x {x.shape} and z {z.shape} do not match") - if y.shape != z.shape: - raise TypeError(f"Shapes of y {y.shape} and z {z.shape} do not match") - else: - raise TypeError(f"Inputs x and y must be None, 1D or 2D, not {x.ndim}D") - - # Check mask shape just in case. - if mask is not None and mask.shape != z.shape: - raise ValueError("If mask is set it must be a 2D array with the same shape as z") - - # Check arguments: name. - if name not in _class_lookup: - raise ValueError(f"Unrecognised contour generator name: {name}") - - # Check arguments: chunk_size, chunk_count and total_chunk_count. - y_chunk_size, x_chunk_size = calc_chunk_sizes( - chunk_size, chunk_count, total_chunk_count, ny, nx) - - cls = _class_lookup[name] - - # Check arguments: corner_mask. - if corner_mask is None: - # Set it to default, which is True if the algorithm supports it. - corner_mask = cls.supports_corner_mask() - elif corner_mask and not cls.supports_corner_mask(): - raise ValueError(f"{name} contour generator does not support corner_mask=True") - - # Check arguments: line_type. - if line_type is None: - line_type = cls.default_line_type - else: - line_type = as_line_type(line_type) - - if not cls.supports_line_type(line_type): - raise ValueError(f"{name} contour generator does not support line_type {line_type}") - - # Check arguments: fill_type. - if fill_type is None: - fill_type = cls.default_fill_type - else: - fill_type = as_fill_type(fill_type) - - if not cls.supports_fill_type(fill_type): - raise ValueError(f"{name} contour generator does not support fill_type {fill_type}") - - # Check arguments: quad_as_tri. - if quad_as_tri and not cls.supports_quad_as_tri(): - raise ValueError(f"{name} contour generator does not support quad_as_tri=True") - - # Check arguments: z_interp. - if z_interp is None: - z_interp = ZInterp.Linear - else: - z_interp = as_z_interp(z_interp) - - if z_interp != ZInterp.Linear and not cls.supports_z_interp(): - raise ValueError(f"{name} contour generator does not support z_interp {z_interp}") - - # Check arguments: thread_count. - if thread_count not in (0, 1) and not cls.supports_threads(): - raise ValueError(f"{name} contour generator does not support thread_count {thread_count}") - - # Prepare args and kwargs for contour generator constructor. - args = [x, y, z, mask] - kwargs: dict[str, int | bool | LineType | FillType | ZInterp] = { - "x_chunk_size": x_chunk_size, - "y_chunk_size": y_chunk_size, - } - - if name not in ("mpl2005", "mpl2014"): - kwargs["line_type"] = line_type - kwargs["fill_type"] = fill_type - - if cls.supports_corner_mask(): - kwargs["corner_mask"] = corner_mask - - if cls.supports_quad_as_tri(): - kwargs["quad_as_tri"] = quad_as_tri - - if cls.supports_z_interp(): - kwargs["z_interp"] = z_interp - - if cls.supports_threads(): - kwargs["thread_count"] = thread_count - - # Create contour generator. - cont_gen = cls(*args, **kwargs) - - return cont_gen diff --git a/spaces/codeparrot/code-generation-models/app.py b/spaces/codeparrot/code-generation-models/app.py deleted file mode 100644 index 58611756203ad2483bc0e239bd2a7b890c5e623c..0000000000000000000000000000000000000000 --- a/spaces/codeparrot/code-generation-models/app.py +++ /dev/null @@ -1,210 +0,0 @@ -import json -import os -import pandas as pd -import requests -import threading -import streamlit as st -from datasets import load_dataset, load_metric - -MODELS = ["CodeParrot", "InCoder", "CodeGen", "PolyCoder"] -GENERATION_MODELS = ["CodeParrot", "InCoder", "CodeGen"] - - -@st.cache() -def load_examples(): - with open("utils/examples.json", "r") as f: - examples = json.load(f) - return examples - - -def load_evaluation(): - # load task 2 of HumanEval and code_eval_metric - os.environ["HF_ALLOW_CODE_EVAL"] = "1" - human_eval = load_dataset("openai_humaneval") - entry_point = f"check({human_eval['test'][2]['entry_point']})" - test_func = "\n" + human_eval["test"][2]["test"] + "\n" + entry_point - code_eval = load_metric("code_eval") - return code_eval, test_func - - -def read_markdown(path): - with open(path, "r") as f: - output = f.read() - st.markdown(output, unsafe_allow_html=True) - - -def generate_code( - generations, model_name, gen_prompt, max_new_tokens, temperature, seed -): - # call space using its API endpoint - url = ( - f"https://hf.space/embed/codeparrot/{model_name.lower()}-subspace/+/api/predict/" - ) - r = requests.post( - url=url, json={"data": [gen_prompt, max_new_tokens, temperature, seed]} - ) - generated_text = r.json()["data"][0] - generations.append({model_name: generated_text}) - - -def generate_code_threads( - generations, models, gen_prompt, max_new_tokens, temperature, seed -): - threads = [] - for model_name in models: - # create the thread - threads.append( - threading.Thread( - target=generate_code, - args=( - generations, - model_name, - gen_prompt, - max_new_tokens, - temperature, - seed, - ), - ) - ) - threads[-1].start() - - for t in threads: - t.join() - -@st.cache(show_spinner=False) -def generate_teaser(gen_prompt): - generations = [] - generate_code(generations, "CodeParrot", gen_prompt, 8, 0.2, 42) - return generations[0]["CodeParrot"] - -st.set_page_config(page_icon=":laptop:", layout="wide") -with open("utils/table_contents.md", "r") as f: - contents = f.read() - -st.sidebar.markdown(contents) - -# Introduction -st.title("Code generation with 🤗") -read_markdown("utils/summary.md") -## teaser -example_text = "def print_hello_world():" -col1, col2, col3 = st.columns([1, 2, 1]) -with col2: - gen_prompt = st.text_area( - "", - value=example_text, - height=100, - ).strip() - if st.button("Generate code!", key=1): - with st.spinner("Generating code..."): - st.code(generate_teaser(gen_prompt)) -read_markdown("utils/intro.md") - -# Code datasets -st.subheader("1 - Code datasets") -read_markdown("datasets/intro.md") -read_markdown("datasets/github_code.md") -col1, col2 = st.columns([1, 2]) -with col1: - selected_model = st.selectbox("", MODELS, key=1) -read_markdown(f"datasets/{selected_model.lower()}.md") - - -# Model architecture -st.subheader("2 - Model architecture") -read_markdown("architectures/intro.md") -col1, col2 = st.columns([1, 2]) -with col1: - selected_model = st.selectbox("", MODELS, key=2) -read_markdown(f"architectures/{selected_model.lower()}.md") - -# Model evaluation -st.subheader("3 - Code model evaluation") -read_markdown("evaluation/intro.md") -read_markdown("evaluation/demo_humaneval.md") -## quiz -st.markdown("Below you can try solving this problem or visualize the solution of CodeParrot:") -with open("evaluation/problem.md", "r") as f: - problem = f.read() -with open("evaluation/solution.md", "r") as f: - solution = f.read() - -candidate_solution = st.text_area( - "Complete the problem:", - value=problem, - height=240, -).strip() -if st.button("Test my solution", key=2): - with st.spinner("Testing..."): - code_eval, test_func = load_evaluation() - test_cases = [test_func] - candidates = [[candidate_solution]] - pass_at_k, _ = code_eval.compute(references=test_cases, predictions=candidates) - text = "Your solution didn't pass the test, pass@1 is 0 😕" if pass_at_k['pass@1'] < 1 else "Congrats your pass@1 is 1! 🎉" - st.markdown(text) -if st.button("Show model solution", key=3): - st.markdown(solution) - -# Code generation -st.subheader("4 - Code generation ✨") -read_markdown("generation/intro.md") -col1, col2, col3 = st.columns([7, 1, 6]) -with col1: - st.markdown("**Models**") - selected_models = st.multiselect( - "Select code generation models to compare:", - GENERATION_MODELS, - default=GENERATION_MODELS, - key=3, - ) - st.markdown(" ") - st.markdown("**Examples**") - examples = load_examples() - example_names = [example["name"] for example in examples] - name2id = dict([(name, i) for i, name in enumerate(example_names)]) - selected_example = st.selectbox( - "Select one of the following examples or implement yours:", example_names - ) - example_text = examples[name2id[selected_example]]["value"] - default_length = examples[name2id[selected_example]]["length"] -with col3: - st.markdown("**Generation settings**") - temperature = st.slider( - "Temperature:", value=0.2, min_value=0.1, step=0.1, max_value=2.0 - ) - max_new_tokens = st.slider( - "Number of tokens to generate:", - value=default_length, - min_value=8, - step=4, - max_value=256, - ) - seed = st.slider("Random seed:", value=42, min_value=0, step=1, max_value=1000) -gen_prompt = st.text_area( - "Generate code with prompt:", - value=example_text, - height=200, -).strip() -if st.button("Generate code!", key=4): - with st.spinner("Generating code..."): - # use threading - generations = [] - generate_code_threads( - generations, - selected_models, - gen_prompt=gen_prompt, - max_new_tokens=max_new_tokens, - temperature=temperature, - seed=seed, - ) - for i in range(len(generations)): - st.markdown(f"**{selected_models[i]}**") - for j in range(len(generations)): - if selected_models[i] in generations[j].keys(): - st.code(generations[j][selected_models[i]]) - if len(generations) < len(selected_models): - st.markdown("Warning: Some models run into timeout, try another time or reduce the Number of tokens to generate. You can also try generating code using the original subspaces: [InCoder](https://huggingface.co/spaces/loubnabnl/incoder-subspace), [CodeGen](https://huggingface.co/spaces/loubnabnl/codegen-subspace), [CodeParrot](https://huggingface.co/spaces/loubnabnl/codeparrot-subspace)", unsafe_allow_html=True) - -# Resources -st.subheader("Resources") -read_markdown("utils/resources.md") diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aacenc.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aacenc.c deleted file mode 100644 index ed036209e928824d3e3d3885db2c84798468d583..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aacenc.c +++ /dev/null @@ -1,1435 +0,0 @@ -/* - * AAC encoder - * Copyright (C) 2008 Konstantin Shishkov - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * AAC encoder - */ - -/*********************************** - * TODOs: - * add sane pulse detection - ***********************************/ -#include - -#include "libavutil/channel_layout.h" -#include "libavutil/libm.h" -#include "libavutil/float_dsp.h" -#include "libavutil/opt.h" -#include "avcodec.h" -#include "codec_internal.h" -#include "encode.h" -#include "put_bits.h" -#include "mpeg4audio.h" -#include "sinewin.h" -#include "profiles.h" -#include "version.h" - -#include "aac.h" -#include "aactab.h" -#include "aacenc.h" -#include "aacenctab.h" -#include "aacenc_utils.h" - -#include "psymodel.h" - -/** - * List of PCE (Program Configuration Element) for the channel layouts listed - * in channel_layout.h - * - * For those wishing in the future to add other layouts: - * - * - num_ele: number of elements in each group of front, side, back, lfe channels - * (an element is of type SCE (single channel), CPE (channel pair) for - * the first 3 groups; and is LFE for LFE group). - * - * - pairing: 0 for an SCE element or 1 for a CPE; does not apply to LFE group - * - * - index: there are three independent indices for SCE, CPE and LFE; - * they are incremented irrespective of the group to which the element belongs; - * they are not reset when going from one group to another - * - * Example: for 7.0 channel layout, - * .pairing = { { 1, 0 }, { 1 }, { 1 }, }, (3 CPE and 1 SCE in front group) - * .index = { { 0, 0 }, { 1 }, { 2 }, }, - * (index is 0 for the single SCE but goes from 0 to 2 for the CPEs) - * - * The index order impacts the channel ordering. But is otherwise arbitrary - * (the sequence could have been 2, 0, 1 instead of 0, 1, 2). - * - * Spec allows for discontinuous indices, e.g. if one has a total of two SCE, - * SCE.0 SCE.15 is OK per spec; BUT it won't be decoded by our AAC decoder - * which at this time requires that indices fully cover some range starting - * from 0 (SCE.1 SCE.0 is OK but not SCE.0 SCE.15). - * - * - config_map: total number of elements and their types. Beware, the way the - * types are ordered impacts the final channel ordering. - * - * - reorder_map: reorders the channels. - * - */ -static const AACPCEInfo aac_pce_configs[] = { - { - .layout = AV_CHANNEL_LAYOUT_MONO, - .num_ele = { 1, 0, 0, 0 }, - .pairing = { { 0 }, }, - .index = { { 0 }, }, - .config_map = { 1, TYPE_SCE, }, - .reorder_map = { 0 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_STEREO, - .num_ele = { 1, 0, 0, 0 }, - .pairing = { { 1 }, }, - .index = { { 0 }, }, - .config_map = { 1, TYPE_CPE, }, - .reorder_map = { 0, 1 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_2POINT1, - .num_ele = { 1, 0, 0, 1 }, - .pairing = { { 1 }, }, - .index = { { 0 },{ 0 },{ 0 },{ 0 } }, - .config_map = { 2, TYPE_CPE, TYPE_LFE }, - .reorder_map = { 0, 1, 2 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_2_1, - .num_ele = { 1, 0, 1, 0 }, - .pairing = { { 1 },{ 0 },{ 0 } }, - .index = { { 0 },{ 0 },{ 0 }, }, - .config_map = { 2, TYPE_CPE, TYPE_SCE }, - .reorder_map = { 0, 1, 2 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_SURROUND, - .num_ele = { 2, 0, 0, 0 }, - .pairing = { { 1, 0 }, }, - .index = { { 0, 0 }, }, - .config_map = { 2, TYPE_CPE, TYPE_SCE, }, - .reorder_map = { 0, 1, 2 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_3POINT1, - .num_ele = { 2, 0, 0, 1 }, - .pairing = { { 1, 0 }, }, - .index = { { 0, 0 }, { 0 }, { 0 }, { 0 }, }, - .config_map = { 3, TYPE_CPE, TYPE_SCE, TYPE_LFE }, - .reorder_map = { 0, 1, 2, 3 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_4POINT0, - .num_ele = { 2, 0, 1, 0 }, - .pairing = { { 1, 0 }, { 0 }, { 0 }, }, - .index = { { 0, 0 }, { 0 }, { 1 } }, - .config_map = { 3, TYPE_CPE, TYPE_SCE, TYPE_SCE }, - .reorder_map = { 0, 1, 2, 3 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_4POINT1, - .num_ele = { 2, 1, 1, 0 }, - .pairing = { { 1, 0 }, { 0 }, { 0 }, }, - .index = { { 0, 0 }, { 1 }, { 2 }, { 0 } }, - .config_map = { 4, TYPE_CPE, TYPE_SCE, TYPE_SCE, TYPE_SCE }, - .reorder_map = { 0, 1, 2, 3, 4 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_2_2, - .num_ele = { 1, 1, 0, 0 }, - .pairing = { { 1 }, { 1 }, }, - .index = { { 0 }, { 1 }, }, - .config_map = { 2, TYPE_CPE, TYPE_CPE }, - .reorder_map = { 0, 1, 2, 3 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_QUAD, - .num_ele = { 1, 0, 1, 0 }, - .pairing = { { 1 }, { 0 }, { 1 }, }, - .index = { { 0 }, { 0 }, { 1 } }, - .config_map = { 2, TYPE_CPE, TYPE_CPE }, - .reorder_map = { 0, 1, 2, 3 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_5POINT0, - .num_ele = { 2, 1, 0, 0 }, - .pairing = { { 1, 0 }, { 1 }, }, - .index = { { 0, 0 }, { 1 } }, - .config_map = { 3, TYPE_CPE, TYPE_SCE, TYPE_CPE }, - .reorder_map = { 0, 1, 2, 3, 4 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_5POINT1, - .num_ele = { 2, 1, 1, 0 }, - .pairing = { { 1, 0 }, { 0 }, { 1 }, }, - .index = { { 0, 0 }, { 1 }, { 1 } }, - .config_map = { 4, TYPE_CPE, TYPE_SCE, TYPE_SCE, TYPE_CPE }, - .reorder_map = { 0, 1, 2, 3, 4, 5 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_5POINT0_BACK, - .num_ele = { 2, 0, 1, 0 }, - .pairing = { { 1, 0 }, { 0 }, { 1 } }, - .index = { { 0, 0 }, { 0 }, { 1 } }, - .config_map = { 3, TYPE_CPE, TYPE_SCE, TYPE_CPE }, - .reorder_map = { 0, 1, 2, 3, 4 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_5POINT1_BACK, - .num_ele = { 2, 1, 1, 0 }, - .pairing = { { 1, 0 }, { 0 }, { 1 }, }, - .index = { { 0, 0 }, { 1 }, { 1 } }, - .config_map = { 4, TYPE_CPE, TYPE_SCE, TYPE_SCE, TYPE_CPE }, - .reorder_map = { 0, 1, 2, 3, 4, 5 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_6POINT0, - .num_ele = { 2, 1, 1, 0 }, - .pairing = { { 1, 0 }, { 1 }, { 0 }, }, - .index = { { 0, 0 }, { 1 }, { 1 } }, - .config_map = { 4, TYPE_CPE, TYPE_SCE, TYPE_CPE, TYPE_SCE }, - .reorder_map = { 0, 1, 2, 3, 4, 5 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_6POINT0_FRONT, - .num_ele = { 2, 1, 0, 0 }, - .pairing = { { 1, 1 }, { 1 } }, - .index = { { 1, 0 }, { 2 }, }, - .config_map = { 3, TYPE_CPE, TYPE_CPE, TYPE_CPE, }, - .reorder_map = { 0, 1, 2, 3, 4, 5 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_HEXAGONAL, - .num_ele = { 2, 0, 2, 0 }, - .pairing = { { 1, 0 },{ 0 },{ 1, 0 }, }, - .index = { { 0, 0 },{ 0 },{ 1, 1 } }, - .config_map = { 4, TYPE_CPE, TYPE_SCE, TYPE_CPE, TYPE_SCE, }, - .reorder_map = { 0, 1, 2, 3, 4, 5 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_6POINT1, - .num_ele = { 2, 1, 2, 0 }, - .pairing = { { 1, 0 },{ 0 },{ 1, 0 }, }, - .index = { { 0, 0 },{ 1 },{ 1, 2 } }, - .config_map = { 5, TYPE_CPE, TYPE_SCE, TYPE_SCE, TYPE_CPE, TYPE_SCE }, - .reorder_map = { 0, 1, 2, 3, 4, 5, 6 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_6POINT1_BACK, - .num_ele = { 2, 1, 2, 0 }, - .pairing = { { 1, 0 }, { 0 }, { 1, 0 }, }, - .index = { { 0, 0 }, { 1 }, { 1, 2 } }, - .config_map = { 5, TYPE_CPE, TYPE_SCE, TYPE_SCE, TYPE_CPE, TYPE_SCE }, - .reorder_map = { 0, 1, 2, 3, 4, 5, 6 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_6POINT1_FRONT, - .num_ele = { 2, 1, 2, 0 }, - .pairing = { { 1, 0 }, { 0 }, { 1, 0 }, }, - .index = { { 0, 0 }, { 1 }, { 1, 2 } }, - .config_map = { 5, TYPE_CPE, TYPE_SCE, TYPE_SCE, TYPE_CPE, TYPE_SCE }, - .reorder_map = { 0, 1, 2, 3, 4, 5, 6 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_7POINT0, - .num_ele = { 2, 1, 1, 0 }, - .pairing = { { 1, 0 }, { 1 }, { 1 }, }, - .index = { { 0, 0 }, { 1 }, { 2 }, }, - .config_map = { 4, TYPE_CPE, TYPE_SCE, TYPE_CPE, TYPE_CPE }, - .reorder_map = { 0, 1, 2, 3, 4, 5, 6 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_7POINT0_FRONT, - .num_ele = { 2, 1, 1, 0 }, - .pairing = { { 1, 0 }, { 1 }, { 1 }, }, - .index = { { 0, 0 }, { 1 }, { 2 }, }, - .config_map = { 4, TYPE_CPE, TYPE_SCE, TYPE_CPE, TYPE_CPE }, - .reorder_map = { 0, 1, 2, 3, 4, 5, 6 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_7POINT1, - .num_ele = { 2, 1, 2, 0 }, - .pairing = { { 1, 0 }, { 0 }, { 1, 1 }, }, - .index = { { 0, 0 }, { 1 }, { 1, 2 }, { 0 } }, - .config_map = { 5, TYPE_CPE, TYPE_SCE, TYPE_SCE, TYPE_CPE, TYPE_CPE }, - .reorder_map = { 0, 1, 2, 3, 4, 5, 6, 7 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_7POINT1_WIDE, - .num_ele = { 2, 1, 2, 0 }, - .pairing = { { 1, 0 }, { 0 },{ 1, 1 }, }, - .index = { { 0, 0 }, { 1 }, { 1, 2 }, { 0 } }, - .config_map = { 5, TYPE_CPE, TYPE_SCE, TYPE_SCE, TYPE_CPE, TYPE_CPE }, - .reorder_map = { 0, 1, 2, 3, 4, 5, 6, 7 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_7POINT1_WIDE_BACK, - .num_ele = { 2, 1, 2, 0 }, - .pairing = { { 1, 0 }, { 0 }, { 1, 1 }, }, - .index = { { 0, 0 }, { 1 }, { 1, 2 }, { 0 } }, - .config_map = { 5, TYPE_CPE, TYPE_SCE, TYPE_SCE, TYPE_CPE, TYPE_CPE }, - .reorder_map = { 0, 1, 2, 3, 4, 5, 6, 7 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_OCTAGONAL, - .num_ele = { 2, 1, 2, 0 }, - .pairing = { { 1, 0 }, { 1 }, { 1, 0 }, }, - .index = { { 0, 0 }, { 1 }, { 2, 1 } }, - .config_map = { 5, TYPE_CPE, TYPE_SCE, TYPE_CPE, TYPE_CPE, TYPE_SCE }, - .reorder_map = { 0, 1, 2, 3, 4, 5, 6, 7 }, - }, - { /* Meant for order 2/mixed ambisonics */ - .layout = { .order = AV_CHANNEL_ORDER_NATIVE, .nb_channels = 9, - .u.mask = AV_CH_LAYOUT_OCTAGONAL | AV_CH_TOP_CENTER }, - .num_ele = { 2, 2, 2, 0 }, - .pairing = { { 1, 0 }, { 1, 0 }, { 1, 0 }, }, - .index = { { 0, 0 }, { 1, 1 }, { 2, 2 } }, - .config_map = { 6, TYPE_CPE, TYPE_SCE, TYPE_CPE, TYPE_SCE, TYPE_CPE, TYPE_SCE }, - .reorder_map = { 0, 1, 2, 3, 4, 5, 6, 7, 8 }, - }, - { /* Meant for order 2/mixed ambisonics */ - .layout = { .order = AV_CHANNEL_ORDER_NATIVE, .nb_channels = 10, - .u.mask = AV_CH_LAYOUT_6POINT0_FRONT | AV_CH_BACK_CENTER | - AV_CH_BACK_LEFT | AV_CH_BACK_RIGHT | AV_CH_TOP_CENTER }, - .num_ele = { 2, 2, 2, 0 }, - .pairing = { { 1, 1 }, { 1, 0 }, { 1, 0 }, }, - .index = { { 0, 1 }, { 2, 0 }, { 3, 1 } }, - .config_map = { 6, TYPE_CPE, TYPE_CPE, TYPE_CPE, TYPE_SCE, TYPE_CPE, TYPE_SCE }, - .reorder_map = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, - }, - { - .layout = AV_CHANNEL_LAYOUT_HEXADECAGONAL, - .num_ele = { 4, 2, 4, 0 }, - .pairing = { { 1, 0, 1, 0 }, { 1, 1 }, { 1, 0, 1, 0 }, }, - .index = { { 0, 0, 1, 1 }, { 2, 3 }, { 4, 2, 5, 3 } }, - .config_map = { 10, TYPE_CPE, TYPE_SCE, TYPE_CPE, TYPE_SCE, TYPE_CPE, TYPE_CPE, TYPE_CPE, TYPE_SCE, TYPE_CPE, TYPE_SCE }, - .reorder_map = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, - }, -}; - -static void put_pce(PutBitContext *pb, AVCodecContext *avctx) -{ - int i, j; - AACEncContext *s = avctx->priv_data; - AACPCEInfo *pce = &s->pce; - const int bitexact = avctx->flags & AV_CODEC_FLAG_BITEXACT; - const char *aux_data = bitexact ? "Lavc" : LIBAVCODEC_IDENT; - - put_bits(pb, 4, 0); - - put_bits(pb, 2, avctx->profile); - put_bits(pb, 4, s->samplerate_index); - - put_bits(pb, 4, pce->num_ele[0]); /* Front */ - put_bits(pb, 4, pce->num_ele[1]); /* Side */ - put_bits(pb, 4, pce->num_ele[2]); /* Back */ - put_bits(pb, 2, pce->num_ele[3]); /* LFE */ - put_bits(pb, 3, 0); /* Assoc data */ - put_bits(pb, 4, 0); /* CCs */ - - put_bits(pb, 1, 0); /* Stereo mixdown */ - put_bits(pb, 1, 0); /* Mono mixdown */ - put_bits(pb, 1, 0); /* Something else */ - - for (i = 0; i < 4; i++) { - for (j = 0; j < pce->num_ele[i]; j++) { - if (i < 3) - put_bits(pb, 1, pce->pairing[i][j]); - put_bits(pb, 4, pce->index[i][j]); - } - } - - align_put_bits(pb); - put_bits(pb, 8, strlen(aux_data)); - ff_put_string(pb, aux_data, 0); -} - -/** - * Make AAC audio config object. - * @see 1.6.2.1 "Syntax - AudioSpecificConfig" - */ -static int put_audio_specific_config(AVCodecContext *avctx) -{ - PutBitContext pb; - AACEncContext *s = avctx->priv_data; - int channels = (!s->needs_pce)*(s->channels - (s->channels == 8 ? 1 : 0)); - const int max_size = 32; - - avctx->extradata = av_mallocz(max_size); - if (!avctx->extradata) - return AVERROR(ENOMEM); - - init_put_bits(&pb, avctx->extradata, max_size); - put_bits(&pb, 5, s->profile+1); //profile - put_bits(&pb, 4, s->samplerate_index); //sample rate index - put_bits(&pb, 4, channels); - //GASpecificConfig - put_bits(&pb, 1, 0); //frame length - 1024 samples - put_bits(&pb, 1, 0); //does not depend on core coder - put_bits(&pb, 1, 0); //is not extension - if (s->needs_pce) - put_pce(&pb, avctx); - - //Explicitly Mark SBR absent - put_bits(&pb, 11, 0x2b7); //sync extension - put_bits(&pb, 5, AOT_SBR); - put_bits(&pb, 1, 0); - flush_put_bits(&pb); - avctx->extradata_size = put_bytes_output(&pb); - - return 0; -} - -void ff_quantize_band_cost_cache_init(struct AACEncContext *s) -{ - ++s->quantize_band_cost_cache_generation; - if (s->quantize_band_cost_cache_generation == 0) { - memset(s->quantize_band_cost_cache, 0, sizeof(s->quantize_band_cost_cache)); - s->quantize_band_cost_cache_generation = 1; - } -} - -#define WINDOW_FUNC(type) \ -static void apply_ ##type ##_window(AVFloatDSPContext *fdsp, \ - SingleChannelElement *sce, \ - const float *audio) - -WINDOW_FUNC(only_long) -{ - const float *lwindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024; - const float *pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024; - float *out = sce->ret_buf; - - fdsp->vector_fmul (out, audio, lwindow, 1024); - fdsp->vector_fmul_reverse(out + 1024, audio + 1024, pwindow, 1024); -} - -WINDOW_FUNC(long_start) -{ - const float *lwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_long_1024 : ff_sine_1024; - const float *swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128; - float *out = sce->ret_buf; - - fdsp->vector_fmul(out, audio, lwindow, 1024); - memcpy(out + 1024, audio + 1024, sizeof(out[0]) * 448); - fdsp->vector_fmul_reverse(out + 1024 + 448, audio + 1024 + 448, swindow, 128); - memset(out + 1024 + 576, 0, sizeof(out[0]) * 448); -} - -WINDOW_FUNC(long_stop) -{ - const float *lwindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024; - const float *swindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128; - float *out = sce->ret_buf; - - memset(out, 0, sizeof(out[0]) * 448); - fdsp->vector_fmul(out + 448, audio + 448, swindow, 128); - memcpy(out + 576, audio + 576, sizeof(out[0]) * 448); - fdsp->vector_fmul_reverse(out + 1024, audio + 1024, lwindow, 1024); -} - -WINDOW_FUNC(eight_short) -{ - const float *swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128; - const float *pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128; - const float *in = audio + 448; - float *out = sce->ret_buf; - int w; - - for (w = 0; w < 8; w++) { - fdsp->vector_fmul (out, in, w ? pwindow : swindow, 128); - out += 128; - in += 128; - fdsp->vector_fmul_reverse(out, in, swindow, 128); - out += 128; - } -} - -static void (*const apply_window[4])(AVFloatDSPContext *fdsp, - SingleChannelElement *sce, - const float *audio) = { - [ONLY_LONG_SEQUENCE] = apply_only_long_window, - [LONG_START_SEQUENCE] = apply_long_start_window, - [EIGHT_SHORT_SEQUENCE] = apply_eight_short_window, - [LONG_STOP_SEQUENCE] = apply_long_stop_window -}; - -static void apply_window_and_mdct(AACEncContext *s, SingleChannelElement *sce, - float *audio) -{ - int i; - float *output = sce->ret_buf; - - apply_window[sce->ics.window_sequence[0]](s->fdsp, sce, audio); - - if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) - s->mdct1024_fn(s->mdct1024, sce->coeffs, output, sizeof(float)); - else - for (i = 0; i < 1024; i += 128) - s->mdct128_fn(s->mdct128, &sce->coeffs[i], output + i*2, sizeof(float)); - memcpy(audio, audio + 1024, sizeof(audio[0]) * 1024); - memcpy(sce->pcoeffs, sce->coeffs, sizeof(sce->pcoeffs)); -} - -/** - * Encode ics_info element. - * @see Table 4.6 (syntax of ics_info) - */ -static void put_ics_info(AACEncContext *s, IndividualChannelStream *info) -{ - int w; - - put_bits(&s->pb, 1, 0); // ics_reserved bit - put_bits(&s->pb, 2, info->window_sequence[0]); - put_bits(&s->pb, 1, info->use_kb_window[0]); - if (info->window_sequence[0] != EIGHT_SHORT_SEQUENCE) { - put_bits(&s->pb, 6, info->max_sfb); - put_bits(&s->pb, 1, !!info->predictor_present); - } else { - put_bits(&s->pb, 4, info->max_sfb); - for (w = 1; w < 8; w++) - put_bits(&s->pb, 1, !info->group_len[w]); - } -} - -/** - * Encode MS data. - * @see 4.6.8.1 "Joint Coding - M/S Stereo" - */ -static void encode_ms_info(PutBitContext *pb, ChannelElement *cpe) -{ - int i, w; - - put_bits(pb, 2, cpe->ms_mode); - if (cpe->ms_mode == 1) - for (w = 0; w < cpe->ch[0].ics.num_windows; w += cpe->ch[0].ics.group_len[w]) - for (i = 0; i < cpe->ch[0].ics.max_sfb; i++) - put_bits(pb, 1, cpe->ms_mask[w*16 + i]); -} - -/** - * Produce integer coefficients from scalefactors provided by the model. - */ -static void adjust_frame_information(ChannelElement *cpe, int chans) -{ - int i, w, w2, g, ch; - int maxsfb, cmaxsfb; - - for (ch = 0; ch < chans; ch++) { - IndividualChannelStream *ics = &cpe->ch[ch].ics; - maxsfb = 0; - cpe->ch[ch].pulse.num_pulse = 0; - for (w = 0; w < ics->num_windows; w += ics->group_len[w]) { - for (w2 = 0; w2 < ics->group_len[w]; w2++) { - for (cmaxsfb = ics->num_swb; cmaxsfb > 0 && cpe->ch[ch].zeroes[w*16+cmaxsfb-1]; cmaxsfb--) - ; - maxsfb = FFMAX(maxsfb, cmaxsfb); - } - } - ics->max_sfb = maxsfb; - - //adjust zero bands for window groups - for (w = 0; w < ics->num_windows; w += ics->group_len[w]) { - for (g = 0; g < ics->max_sfb; g++) { - i = 1; - for (w2 = w; w2 < w + ics->group_len[w]; w2++) { - if (!cpe->ch[ch].zeroes[w2*16 + g]) { - i = 0; - break; - } - } - cpe->ch[ch].zeroes[w*16 + g] = i; - } - } - } - - if (chans > 1 && cpe->common_window) { - IndividualChannelStream *ics0 = &cpe->ch[0].ics; - IndividualChannelStream *ics1 = &cpe->ch[1].ics; - int msc = 0; - ics0->max_sfb = FFMAX(ics0->max_sfb, ics1->max_sfb); - ics1->max_sfb = ics0->max_sfb; - for (w = 0; w < ics0->num_windows*16; w += 16) - for (i = 0; i < ics0->max_sfb; i++) - if (cpe->ms_mask[w+i]) - msc++; - if (msc == 0 || ics0->max_sfb == 0) - cpe->ms_mode = 0; - else - cpe->ms_mode = msc < ics0->max_sfb * ics0->num_windows ? 1 : 2; - } -} - -static void apply_intensity_stereo(ChannelElement *cpe) -{ - int w, w2, g, i; - IndividualChannelStream *ics = &cpe->ch[0].ics; - if (!cpe->common_window) - return; - for (w = 0; w < ics->num_windows; w += ics->group_len[w]) { - for (w2 = 0; w2 < ics->group_len[w]; w2++) { - int start = (w+w2) * 128; - for (g = 0; g < ics->num_swb; g++) { - int p = -1 + 2 * (cpe->ch[1].band_type[w*16+g] - 14); - float scale = cpe->ch[0].is_ener[w*16+g]; - if (!cpe->is_mask[w*16 + g]) { - start += ics->swb_sizes[g]; - continue; - } - if (cpe->ms_mask[w*16 + g]) - p *= -1; - for (i = 0; i < ics->swb_sizes[g]; i++) { - float sum = (cpe->ch[0].coeffs[start+i] + p*cpe->ch[1].coeffs[start+i])*scale; - cpe->ch[0].coeffs[start+i] = sum; - cpe->ch[1].coeffs[start+i] = 0.0f; - } - start += ics->swb_sizes[g]; - } - } - } -} - -static void apply_mid_side_stereo(ChannelElement *cpe) -{ - int w, w2, g, i; - IndividualChannelStream *ics = &cpe->ch[0].ics; - if (!cpe->common_window) - return; - for (w = 0; w < ics->num_windows; w += ics->group_len[w]) { - for (w2 = 0; w2 < ics->group_len[w]; w2++) { - int start = (w+w2) * 128; - for (g = 0; g < ics->num_swb; g++) { - /* ms_mask can be used for other purposes in PNS and I/S, - * so must not apply M/S if any band uses either, even if - * ms_mask is set. - */ - if (!cpe->ms_mask[w*16 + g] || cpe->is_mask[w*16 + g] - || cpe->ch[0].band_type[w*16 + g] >= NOISE_BT - || cpe->ch[1].band_type[w*16 + g] >= NOISE_BT) { - start += ics->swb_sizes[g]; - continue; - } - for (i = 0; i < ics->swb_sizes[g]; i++) { - float L = (cpe->ch[0].coeffs[start+i] + cpe->ch[1].coeffs[start+i]) * 0.5f; - float R = L - cpe->ch[1].coeffs[start+i]; - cpe->ch[0].coeffs[start+i] = L; - cpe->ch[1].coeffs[start+i] = R; - } - start += ics->swb_sizes[g]; - } - } - } -} - -/** - * Encode scalefactor band coding type. - */ -static void encode_band_info(AACEncContext *s, SingleChannelElement *sce) -{ - int w; - - if (s->coder->set_special_band_scalefactors) - s->coder->set_special_band_scalefactors(s, sce); - - for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) - s->coder->encode_window_bands_info(s, sce, w, sce->ics.group_len[w], s->lambda); -} - -/** - * Encode scalefactors. - */ -static void encode_scale_factors(AVCodecContext *avctx, AACEncContext *s, - SingleChannelElement *sce) -{ - int diff, off_sf = sce->sf_idx[0], off_pns = sce->sf_idx[0] - NOISE_OFFSET; - int off_is = 0, noise_flag = 1; - int i, w; - - for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { - for (i = 0; i < sce->ics.max_sfb; i++) { - if (!sce->zeroes[w*16 + i]) { - if (sce->band_type[w*16 + i] == NOISE_BT) { - diff = sce->sf_idx[w*16 + i] - off_pns; - off_pns = sce->sf_idx[w*16 + i]; - if (noise_flag-- > 0) { - put_bits(&s->pb, NOISE_PRE_BITS, diff + NOISE_PRE); - continue; - } - } else if (sce->band_type[w*16 + i] == INTENSITY_BT || - sce->band_type[w*16 + i] == INTENSITY_BT2) { - diff = sce->sf_idx[w*16 + i] - off_is; - off_is = sce->sf_idx[w*16 + i]; - } else { - diff = sce->sf_idx[w*16 + i] - off_sf; - off_sf = sce->sf_idx[w*16 + i]; - } - diff += SCALE_DIFF_ZERO; - av_assert0(diff >= 0 && diff <= 120); - put_bits(&s->pb, ff_aac_scalefactor_bits[diff], ff_aac_scalefactor_code[diff]); - } - } - } -} - -/** - * Encode pulse data. - */ -static void encode_pulses(AACEncContext *s, Pulse *pulse) -{ - int i; - - put_bits(&s->pb, 1, !!pulse->num_pulse); - if (!pulse->num_pulse) - return; - - put_bits(&s->pb, 2, pulse->num_pulse - 1); - put_bits(&s->pb, 6, pulse->start); - for (i = 0; i < pulse->num_pulse; i++) { - put_bits(&s->pb, 5, pulse->pos[i]); - put_bits(&s->pb, 4, pulse->amp[i]); - } -} - -/** - * Encode spectral coefficients processed by psychoacoustic model. - */ -static void encode_spectral_coeffs(AACEncContext *s, SingleChannelElement *sce) -{ - int start, i, w, w2; - - for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) { - start = 0; - for (i = 0; i < sce->ics.max_sfb; i++) { - if (sce->zeroes[w*16 + i]) { - start += sce->ics.swb_sizes[i]; - continue; - } - for (w2 = w; w2 < w + sce->ics.group_len[w]; w2++) { - s->coder->quantize_and_encode_band(s, &s->pb, - &sce->coeffs[start + w2*128], - NULL, sce->ics.swb_sizes[i], - sce->sf_idx[w*16 + i], - sce->band_type[w*16 + i], - s->lambda, - sce->ics.window_clipping[w]); - } - start += sce->ics.swb_sizes[i]; - } - } -} - -/** - * Downscale spectral coefficients for near-clipping windows to avoid artifacts - */ -static void avoid_clipping(AACEncContext *s, SingleChannelElement *sce) -{ - int start, i, j, w; - - if (sce->ics.clip_avoidance_factor < 1.0f) { - for (w = 0; w < sce->ics.num_windows; w++) { - start = 0; - for (i = 0; i < sce->ics.max_sfb; i++) { - float *swb_coeffs = &sce->coeffs[start + w*128]; - for (j = 0; j < sce->ics.swb_sizes[i]; j++) - swb_coeffs[j] *= sce->ics.clip_avoidance_factor; - start += sce->ics.swb_sizes[i]; - } - } - } -} - -/** - * Encode one channel of audio data. - */ -static int encode_individual_channel(AVCodecContext *avctx, AACEncContext *s, - SingleChannelElement *sce, - int common_window) -{ - put_bits(&s->pb, 8, sce->sf_idx[0]); - if (!common_window) { - put_ics_info(s, &sce->ics); - if (s->coder->encode_main_pred) - s->coder->encode_main_pred(s, sce); - if (s->coder->encode_ltp_info) - s->coder->encode_ltp_info(s, sce, 0); - } - encode_band_info(s, sce); - encode_scale_factors(avctx, s, sce); - encode_pulses(s, &sce->pulse); - put_bits(&s->pb, 1, !!sce->tns.present); - if (s->coder->encode_tns_info) - s->coder->encode_tns_info(s, sce); - put_bits(&s->pb, 1, 0); //ssr - encode_spectral_coeffs(s, sce); - return 0; -} - -/** - * Write some auxiliary information about the created AAC file. - */ -static void put_bitstream_info(AACEncContext *s, const char *name) -{ - int i, namelen, padbits; - - namelen = strlen(name) + 2; - put_bits(&s->pb, 3, TYPE_FIL); - put_bits(&s->pb, 4, FFMIN(namelen, 15)); - if (namelen >= 15) - put_bits(&s->pb, 8, namelen - 14); - put_bits(&s->pb, 4, 0); //extension type - filler - padbits = -put_bits_count(&s->pb) & 7; - align_put_bits(&s->pb); - for (i = 0; i < namelen - 2; i++) - put_bits(&s->pb, 8, name[i]); - put_bits(&s->pb, 12 - padbits, 0); -} - -/* - * Copy input samples. - * Channels are reordered from libavcodec's default order to AAC order. - */ -static void copy_input_samples(AACEncContext *s, const AVFrame *frame) -{ - int ch; - int end = 2048 + (frame ? frame->nb_samples : 0); - const uint8_t *channel_map = s->reorder_map; - - /* copy and remap input samples */ - for (ch = 0; ch < s->channels; ch++) { - /* copy last 1024 samples of previous frame to the start of the current frame */ - memcpy(&s->planar_samples[ch][1024], &s->planar_samples[ch][2048], 1024 * sizeof(s->planar_samples[0][0])); - - /* copy new samples and zero any remaining samples */ - if (frame) { - memcpy(&s->planar_samples[ch][2048], - frame->extended_data[channel_map[ch]], - frame->nb_samples * sizeof(s->planar_samples[0][0])); - } - memset(&s->planar_samples[ch][end], 0, - (3072 - end) * sizeof(s->planar_samples[0][0])); - } -} - -static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, - const AVFrame *frame, int *got_packet_ptr) -{ - AACEncContext *s = avctx->priv_data; - float **samples = s->planar_samples, *samples2, *la, *overlap; - ChannelElement *cpe; - SingleChannelElement *sce; - IndividualChannelStream *ics; - int i, its, ch, w, chans, tag, start_ch, ret, frame_bits; - int target_bits, rate_bits, too_many_bits, too_few_bits; - int ms_mode = 0, is_mode = 0, tns_mode = 0, pred_mode = 0; - int chan_el_counter[4]; - FFPsyWindowInfo windows[AAC_MAX_CHANNELS]; - - /* add current frame to queue */ - if (frame) { - if ((ret = ff_af_queue_add(&s->afq, frame)) < 0) - return ret; - } else { - if (!s->afq.remaining_samples || (!s->afq.frame_alloc && !s->afq.frame_count)) - return 0; - } - - copy_input_samples(s, frame); - if (s->psypp) - ff_psy_preprocess(s->psypp, s->planar_samples, s->channels); - - if (!avctx->frame_num) - return 0; - - start_ch = 0; - for (i = 0; i < s->chan_map[0]; i++) { - FFPsyWindowInfo* wi = windows + start_ch; - tag = s->chan_map[i+1]; - chans = tag == TYPE_CPE ? 2 : 1; - cpe = &s->cpe[i]; - for (ch = 0; ch < chans; ch++) { - int k; - float clip_avoidance_factor; - sce = &cpe->ch[ch]; - ics = &sce->ics; - s->cur_channel = start_ch + ch; - overlap = &samples[s->cur_channel][0]; - samples2 = overlap + 1024; - la = samples2 + (448+64); - if (!frame) - la = NULL; - if (tag == TYPE_LFE) { - wi[ch].window_type[0] = wi[ch].window_type[1] = ONLY_LONG_SEQUENCE; - wi[ch].window_shape = 0; - wi[ch].num_windows = 1; - wi[ch].grouping[0] = 1; - wi[ch].clipping[0] = 0; - - /* Only the lowest 12 coefficients are used in a LFE channel. - * The expression below results in only the bottom 8 coefficients - * being used for 11.025kHz to 16kHz sample rates. - */ - ics->num_swb = s->samplerate_index >= 8 ? 1 : 3; - } else { - wi[ch] = s->psy.model->window(&s->psy, samples2, la, s->cur_channel, - ics->window_sequence[0]); - } - ics->window_sequence[1] = ics->window_sequence[0]; - ics->window_sequence[0] = wi[ch].window_type[0]; - ics->use_kb_window[1] = ics->use_kb_window[0]; - ics->use_kb_window[0] = wi[ch].window_shape; - ics->num_windows = wi[ch].num_windows; - ics->swb_sizes = s->psy.bands [ics->num_windows == 8]; - ics->num_swb = tag == TYPE_LFE ? ics->num_swb : s->psy.num_bands[ics->num_windows == 8]; - ics->max_sfb = FFMIN(ics->max_sfb, ics->num_swb); - ics->swb_offset = wi[ch].window_type[0] == EIGHT_SHORT_SEQUENCE ? - ff_swb_offset_128 [s->samplerate_index]: - ff_swb_offset_1024[s->samplerate_index]; - ics->tns_max_bands = wi[ch].window_type[0] == EIGHT_SHORT_SEQUENCE ? - ff_tns_max_bands_128 [s->samplerate_index]: - ff_tns_max_bands_1024[s->samplerate_index]; - - for (w = 0; w < ics->num_windows; w++) - ics->group_len[w] = wi[ch].grouping[w]; - - /* Calculate input sample maximums and evaluate clipping risk */ - clip_avoidance_factor = 0.0f; - for (w = 0; w < ics->num_windows; w++) { - const float *wbuf = overlap + w * 128; - const int wlen = 2048 / ics->num_windows; - float max = 0; - int j; - /* mdct input is 2 * output */ - for (j = 0; j < wlen; j++) - max = FFMAX(max, fabsf(wbuf[j])); - wi[ch].clipping[w] = max; - } - for (w = 0; w < ics->num_windows; w++) { - if (wi[ch].clipping[w] > CLIP_AVOIDANCE_FACTOR) { - ics->window_clipping[w] = 1; - clip_avoidance_factor = FFMAX(clip_avoidance_factor, wi[ch].clipping[w]); - } else { - ics->window_clipping[w] = 0; - } - } - if (clip_avoidance_factor > CLIP_AVOIDANCE_FACTOR) { - ics->clip_avoidance_factor = CLIP_AVOIDANCE_FACTOR / clip_avoidance_factor; - } else { - ics->clip_avoidance_factor = 1.0f; - } - - apply_window_and_mdct(s, sce, overlap); - - if (s->options.ltp && s->coder->update_ltp) { - s->coder->update_ltp(s, sce); - apply_window[sce->ics.window_sequence[0]](s->fdsp, sce, &sce->ltp_state[0]); - s->mdct1024_fn(s->mdct1024, sce->lcoeffs, sce->ret_buf, sizeof(float)); - } - - for (k = 0; k < 1024; k++) { - if (!(fabs(cpe->ch[ch].coeffs[k]) < 1E16)) { // Ensure headroom for energy calculation - av_log(avctx, AV_LOG_ERROR, "Input contains (near) NaN/+-Inf\n"); - return AVERROR(EINVAL); - } - } - avoid_clipping(s, sce); - } - start_ch += chans; - } - if ((ret = ff_alloc_packet(avctx, avpkt, 8192 * s->channels)) < 0) - return ret; - frame_bits = its = 0; - do { - init_put_bits(&s->pb, avpkt->data, avpkt->size); - - if ((avctx->frame_num & 0xFF)==1 && !(avctx->flags & AV_CODEC_FLAG_BITEXACT)) - put_bitstream_info(s, LIBAVCODEC_IDENT); - start_ch = 0; - target_bits = 0; - memset(chan_el_counter, 0, sizeof(chan_el_counter)); - for (i = 0; i < s->chan_map[0]; i++) { - FFPsyWindowInfo* wi = windows + start_ch; - const float *coeffs[2]; - tag = s->chan_map[i+1]; - chans = tag == TYPE_CPE ? 2 : 1; - cpe = &s->cpe[i]; - cpe->common_window = 0; - memset(cpe->is_mask, 0, sizeof(cpe->is_mask)); - memset(cpe->ms_mask, 0, sizeof(cpe->ms_mask)); - put_bits(&s->pb, 3, tag); - put_bits(&s->pb, 4, chan_el_counter[tag]++); - for (ch = 0; ch < chans; ch++) { - sce = &cpe->ch[ch]; - coeffs[ch] = sce->coeffs; - sce->ics.predictor_present = 0; - sce->ics.ltp.present = 0; - memset(sce->ics.ltp.used, 0, sizeof(sce->ics.ltp.used)); - memset(sce->ics.prediction_used, 0, sizeof(sce->ics.prediction_used)); - memset(&sce->tns, 0, sizeof(TemporalNoiseShaping)); - for (w = 0; w < 128; w++) - if (sce->band_type[w] > RESERVED_BT) - sce->band_type[w] = 0; - } - s->psy.bitres.alloc = -1; - s->psy.bitres.bits = s->last_frame_pb_count / s->channels; - s->psy.model->analyze(&s->psy, start_ch, coeffs, wi); - if (s->psy.bitres.alloc > 0) { - /* Lambda unused here on purpose, we need to take psy's unscaled allocation */ - target_bits += s->psy.bitres.alloc - * (s->lambda / (avctx->global_quality ? avctx->global_quality : 120)); - s->psy.bitres.alloc /= chans; - } - s->cur_type = tag; - for (ch = 0; ch < chans; ch++) { - s->cur_channel = start_ch + ch; - if (s->options.pns && s->coder->mark_pns) - s->coder->mark_pns(s, avctx, &cpe->ch[ch]); - s->coder->search_for_quantizers(avctx, s, &cpe->ch[ch], s->lambda); - } - if (chans > 1 - && wi[0].window_type[0] == wi[1].window_type[0] - && wi[0].window_shape == wi[1].window_shape) { - - cpe->common_window = 1; - for (w = 0; w < wi[0].num_windows; w++) { - if (wi[0].grouping[w] != wi[1].grouping[w]) { - cpe->common_window = 0; - break; - } - } - } - for (ch = 0; ch < chans; ch++) { /* TNS and PNS */ - sce = &cpe->ch[ch]; - s->cur_channel = start_ch + ch; - if (s->options.tns && s->coder->search_for_tns) - s->coder->search_for_tns(s, sce); - if (s->options.tns && s->coder->apply_tns_filt) - s->coder->apply_tns_filt(s, sce); - if (sce->tns.present) - tns_mode = 1; - if (s->options.pns && s->coder->search_for_pns) - s->coder->search_for_pns(s, avctx, sce); - } - s->cur_channel = start_ch; - if (s->options.intensity_stereo) { /* Intensity Stereo */ - if (s->coder->search_for_is) - s->coder->search_for_is(s, avctx, cpe); - if (cpe->is_mode) is_mode = 1; - apply_intensity_stereo(cpe); - } - if (s->options.pred) { /* Prediction */ - for (ch = 0; ch < chans; ch++) { - sce = &cpe->ch[ch]; - s->cur_channel = start_ch + ch; - if (s->options.pred && s->coder->search_for_pred) - s->coder->search_for_pred(s, sce); - if (cpe->ch[ch].ics.predictor_present) pred_mode = 1; - } - if (s->coder->adjust_common_pred) - s->coder->adjust_common_pred(s, cpe); - for (ch = 0; ch < chans; ch++) { - sce = &cpe->ch[ch]; - s->cur_channel = start_ch + ch; - if (s->options.pred && s->coder->apply_main_pred) - s->coder->apply_main_pred(s, sce); - } - s->cur_channel = start_ch; - } - if (s->options.mid_side) { /* Mid/Side stereo */ - if (s->options.mid_side == -1 && s->coder->search_for_ms) - s->coder->search_for_ms(s, cpe); - else if (cpe->common_window) - memset(cpe->ms_mask, 1, sizeof(cpe->ms_mask)); - apply_mid_side_stereo(cpe); - } - adjust_frame_information(cpe, chans); - if (s->options.ltp) { /* LTP */ - for (ch = 0; ch < chans; ch++) { - sce = &cpe->ch[ch]; - s->cur_channel = start_ch + ch; - if (s->coder->search_for_ltp) - s->coder->search_for_ltp(s, sce, cpe->common_window); - if (sce->ics.ltp.present) pred_mode = 1; - } - s->cur_channel = start_ch; - if (s->coder->adjust_common_ltp) - s->coder->adjust_common_ltp(s, cpe); - } - if (chans == 2) { - put_bits(&s->pb, 1, cpe->common_window); - if (cpe->common_window) { - put_ics_info(s, &cpe->ch[0].ics); - if (s->coder->encode_main_pred) - s->coder->encode_main_pred(s, &cpe->ch[0]); - if (s->coder->encode_ltp_info) - s->coder->encode_ltp_info(s, &cpe->ch[0], 1); - encode_ms_info(&s->pb, cpe); - if (cpe->ms_mode) ms_mode = 1; - } - } - for (ch = 0; ch < chans; ch++) { - s->cur_channel = start_ch + ch; - encode_individual_channel(avctx, s, &cpe->ch[ch], cpe->common_window); - } - start_ch += chans; - } - - if (avctx->flags & AV_CODEC_FLAG_QSCALE) { - /* When using a constant Q-scale, don't mess with lambda */ - break; - } - - /* rate control stuff - * allow between the nominal bitrate, and what psy's bit reservoir says to target - * but drift towards the nominal bitrate always - */ - frame_bits = put_bits_count(&s->pb); - rate_bits = avctx->bit_rate * 1024 / avctx->sample_rate; - rate_bits = FFMIN(rate_bits, 6144 * s->channels - 3); - too_many_bits = FFMAX(target_bits, rate_bits); - too_many_bits = FFMIN(too_many_bits, 6144 * s->channels - 3); - too_few_bits = FFMIN(FFMAX(rate_bits - rate_bits/4, target_bits), too_many_bits); - - /* When using ABR, be strict (but only for increasing) */ - too_few_bits = too_few_bits - too_few_bits/8; - too_many_bits = too_many_bits + too_many_bits/2; - - if ( its == 0 /* for steady-state Q-scale tracking */ - || (its < 5 && (frame_bits < too_few_bits || frame_bits > too_many_bits)) - || frame_bits >= 6144 * s->channels - 3 ) - { - float ratio = ((float)rate_bits) / frame_bits; - - if (frame_bits >= too_few_bits && frame_bits <= too_many_bits) { - /* - * This path is for steady-state Q-scale tracking - * When frame bits fall within the stable range, we still need to adjust - * lambda to maintain it like so in a stable fashion (large jumps in lambda - * create artifacts and should be avoided), but slowly - */ - ratio = sqrtf(sqrtf(ratio)); - ratio = av_clipf(ratio, 0.9f, 1.1f); - } else { - /* Not so fast though */ - ratio = sqrtf(ratio); - } - s->lambda = av_clipf(s->lambda * ratio, FLT_EPSILON, 65536.f); - - /* Keep iterating if we must reduce and lambda is in the sky */ - if (ratio > 0.9f && ratio < 1.1f) { - break; - } else { - if (is_mode || ms_mode || tns_mode || pred_mode) { - for (i = 0; i < s->chan_map[0]; i++) { - // Must restore coeffs - chans = tag == TYPE_CPE ? 2 : 1; - cpe = &s->cpe[i]; - for (ch = 0; ch < chans; ch++) - memcpy(cpe->ch[ch].coeffs, cpe->ch[ch].pcoeffs, sizeof(cpe->ch[ch].coeffs)); - } - } - its++; - } - } else { - break; - } - } while (1); - - if (s->options.ltp && s->coder->ltp_insert_new_frame) - s->coder->ltp_insert_new_frame(s); - - put_bits(&s->pb, 3, TYPE_END); - flush_put_bits(&s->pb); - - s->last_frame_pb_count = put_bits_count(&s->pb); - avpkt->size = put_bytes_output(&s->pb); - - s->lambda_sum += s->lambda; - s->lambda_count++; - - ff_af_queue_remove(&s->afq, avctx->frame_size, &avpkt->pts, - &avpkt->duration); - - *got_packet_ptr = 1; - return 0; -} - -static av_cold int aac_encode_end(AVCodecContext *avctx) -{ - AACEncContext *s = avctx->priv_data; - - av_log(avctx, AV_LOG_INFO, "Qavg: %.3f\n", s->lambda_count ? s->lambda_sum / s->lambda_count : NAN); - - av_tx_uninit(&s->mdct1024); - av_tx_uninit(&s->mdct128); - ff_psy_end(&s->psy); - ff_lpc_end(&s->lpc); - if (s->psypp) - ff_psy_preprocess_end(s->psypp); - av_freep(&s->buffer.samples); - av_freep(&s->cpe); - av_freep(&s->fdsp); - ff_af_queue_close(&s->afq); - return 0; -} - -static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s) -{ - int ret = 0; - float scale = 32768.0f; - - s->fdsp = avpriv_float_dsp_alloc(avctx->flags & AV_CODEC_FLAG_BITEXACT); - if (!s->fdsp) - return AVERROR(ENOMEM); - - // window init - ff_aac_float_common_init(); - - if ((ret = av_tx_init(&s->mdct1024, &s->mdct1024_fn, AV_TX_FLOAT_MDCT, 0, - 1024, &scale, 0)) < 0) - return ret; - if ((ret = av_tx_init(&s->mdct128, &s->mdct128_fn, AV_TX_FLOAT_MDCT, 0, - 128, &scale, 0)) < 0) - return ret; - - return 0; -} - -static av_cold int alloc_buffers(AVCodecContext *avctx, AACEncContext *s) -{ - int ch; - if (!FF_ALLOCZ_TYPED_ARRAY(s->buffer.samples, s->channels * 3 * 1024) || - !FF_ALLOCZ_TYPED_ARRAY(s->cpe, s->chan_map[0])) - return AVERROR(ENOMEM); - - for(ch = 0; ch < s->channels; ch++) - s->planar_samples[ch] = s->buffer.samples + 3 * 1024 * ch; - - return 0; -} - -static av_cold int aac_encode_init(AVCodecContext *avctx) -{ - AACEncContext *s = avctx->priv_data; - int i, ret = 0; - const uint8_t *sizes[2]; - uint8_t grouping[AAC_MAX_CHANNELS]; - int lengths[2]; - - /* Constants */ - s->last_frame_pb_count = 0; - avctx->frame_size = 1024; - avctx->initial_padding = 1024; - s->lambda = avctx->global_quality > 0 ? avctx->global_quality : 120; - - /* Channel map and unspecified bitrate guessing */ - s->channels = avctx->ch_layout.nb_channels; - - s->needs_pce = 1; - for (i = 0; i < FF_ARRAY_ELEMS(aac_normal_chan_layouts); i++) { - if (!av_channel_layout_compare(&avctx->ch_layout, &aac_normal_chan_layouts[i])) { - s->needs_pce = s->options.pce; - break; - } - } - - if (s->needs_pce) { - char buf[64]; - for (i = 0; i < FF_ARRAY_ELEMS(aac_pce_configs); i++) - if (!av_channel_layout_compare(&avctx->ch_layout, &aac_pce_configs[i].layout)) - break; - av_channel_layout_describe(&avctx->ch_layout, buf, sizeof(buf)); - if (i == FF_ARRAY_ELEMS(aac_pce_configs)) { - av_log(avctx, AV_LOG_ERROR, "Unsupported channel layout \"%s\"\n", buf); - return AVERROR(EINVAL); - } - av_log(avctx, AV_LOG_INFO, "Using a PCE to encode channel layout \"%s\"\n", buf); - s->pce = aac_pce_configs[i]; - s->reorder_map = s->pce.reorder_map; - s->chan_map = s->pce.config_map; - } else { - s->reorder_map = aac_chan_maps[s->channels - 1]; - s->chan_map = aac_chan_configs[s->channels - 1]; - } - - if (!avctx->bit_rate) { - for (i = 1; i <= s->chan_map[0]; i++) { - avctx->bit_rate += s->chan_map[i] == TYPE_CPE ? 128000 : /* Pair */ - s->chan_map[i] == TYPE_LFE ? 16000 : /* LFE */ - 69000 ; /* SCE */ - } - } - - /* Samplerate */ - for (i = 0; i < 16; i++) - if (avctx->sample_rate == ff_mpeg4audio_sample_rates[i]) - break; - s->samplerate_index = i; - ERROR_IF(s->samplerate_index == 16 || - s->samplerate_index >= ff_aac_swb_size_1024_len || - s->samplerate_index >= ff_aac_swb_size_128_len, - "Unsupported sample rate %d\n", avctx->sample_rate); - - /* Bitrate limiting */ - WARN_IF(1024.0 * avctx->bit_rate / avctx->sample_rate > 6144 * s->channels, - "Too many bits %f > %d per frame requested, clamping to max\n", - 1024.0 * avctx->bit_rate / avctx->sample_rate, - 6144 * s->channels); - avctx->bit_rate = (int64_t)FFMIN(6144 * s->channels / 1024.0 * avctx->sample_rate, - avctx->bit_rate); - - /* Profile and option setting */ - avctx->profile = avctx->profile == FF_PROFILE_UNKNOWN ? FF_PROFILE_AAC_LOW : - avctx->profile; - for (i = 0; i < FF_ARRAY_ELEMS(aacenc_profiles); i++) - if (avctx->profile == aacenc_profiles[i]) - break; - if (avctx->profile == FF_PROFILE_MPEG2_AAC_LOW) { - avctx->profile = FF_PROFILE_AAC_LOW; - ERROR_IF(s->options.pred, - "Main prediction unavailable in the \"mpeg2_aac_low\" profile\n"); - ERROR_IF(s->options.ltp, - "LTP prediction unavailable in the \"mpeg2_aac_low\" profile\n"); - WARN_IF(s->options.pns, - "PNS unavailable in the \"mpeg2_aac_low\" profile, turning off\n"); - s->options.pns = 0; - } else if (avctx->profile == FF_PROFILE_AAC_LTP) { - s->options.ltp = 1; - ERROR_IF(s->options.pred, - "Main prediction unavailable in the \"aac_ltp\" profile\n"); - } else if (avctx->profile == FF_PROFILE_AAC_MAIN) { - s->options.pred = 1; - ERROR_IF(s->options.ltp, - "LTP prediction unavailable in the \"aac_main\" profile\n"); - } else if (s->options.ltp) { - avctx->profile = FF_PROFILE_AAC_LTP; - WARN_IF(1, - "Chainging profile to \"aac_ltp\"\n"); - ERROR_IF(s->options.pred, - "Main prediction unavailable in the \"aac_ltp\" profile\n"); - } else if (s->options.pred) { - avctx->profile = FF_PROFILE_AAC_MAIN; - WARN_IF(1, - "Chainging profile to \"aac_main\"\n"); - ERROR_IF(s->options.ltp, - "LTP prediction unavailable in the \"aac_main\" profile\n"); - } - s->profile = avctx->profile; - - /* Coder limitations */ - s->coder = &ff_aac_coders[s->options.coder]; - if (s->options.coder == AAC_CODER_ANMR) { - ERROR_IF(avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL, - "The ANMR coder is considered experimental, add -strict -2 to enable!\n"); - s->options.intensity_stereo = 0; - s->options.pns = 0; - } - ERROR_IF(s->options.ltp && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL, - "The LPT profile requires experimental compliance, add -strict -2 to enable!\n"); - - /* M/S introduces horrible artifacts with multichannel files, this is temporary */ - if (s->channels > 3) - s->options.mid_side = 0; - - if ((ret = dsp_init(avctx, s)) < 0) - return ret; - - if ((ret = alloc_buffers(avctx, s)) < 0) - return ret; - - if ((ret = put_audio_specific_config(avctx))) - return ret; - - sizes[0] = ff_aac_swb_size_1024[s->samplerate_index]; - sizes[1] = ff_aac_swb_size_128[s->samplerate_index]; - lengths[0] = ff_aac_num_swb_1024[s->samplerate_index]; - lengths[1] = ff_aac_num_swb_128[s->samplerate_index]; - for (i = 0; i < s->chan_map[0]; i++) - grouping[i] = s->chan_map[i + 1] == TYPE_CPE; - if ((ret = ff_psy_init(&s->psy, avctx, 2, sizes, lengths, - s->chan_map[0], grouping)) < 0) - return ret; - s->psypp = ff_psy_preprocess_init(avctx); - ff_lpc_init(&s->lpc, 2*avctx->frame_size, TNS_MAX_ORDER, FF_LPC_TYPE_LEVINSON); - s->random_state = 0x1f2e3d4c; - - s->abs_pow34 = abs_pow34_v; - s->quant_bands = quantize_bands; - -#if ARCH_X86 - ff_aac_dsp_init_x86(s); -#endif - -#if HAVE_MIPSDSP - ff_aac_coder_init_mips(s); -#endif - - ff_af_queue_init(avctx, &s->afq); - ff_aac_tableinit(); - - return 0; -} - -#define AACENC_FLAGS AV_OPT_FLAG_ENCODING_PARAM | AV_OPT_FLAG_AUDIO_PARAM -static const AVOption aacenc_options[] = { - {"aac_coder", "Coding algorithm", offsetof(AACEncContext, options.coder), AV_OPT_TYPE_INT, {.i64 = AAC_CODER_TWOLOOP}, 0, AAC_CODER_NB-1, AACENC_FLAGS, "coder"}, - {"anmr", "ANMR method", 0, AV_OPT_TYPE_CONST, {.i64 = AAC_CODER_ANMR}, INT_MIN, INT_MAX, AACENC_FLAGS, "coder"}, - {"twoloop", "Two loop searching method", 0, AV_OPT_TYPE_CONST, {.i64 = AAC_CODER_TWOLOOP}, INT_MIN, INT_MAX, AACENC_FLAGS, "coder"}, - {"fast", "Default fast search", 0, AV_OPT_TYPE_CONST, {.i64 = AAC_CODER_FAST}, INT_MIN, INT_MAX, AACENC_FLAGS, "coder"}, - {"aac_ms", "Force M/S stereo coding", offsetof(AACEncContext, options.mid_side), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, AACENC_FLAGS}, - {"aac_is", "Intensity stereo coding", offsetof(AACEncContext, options.intensity_stereo), AV_OPT_TYPE_BOOL, {.i64 = 1}, -1, 1, AACENC_FLAGS}, - {"aac_pns", "Perceptual noise substitution", offsetof(AACEncContext, options.pns), AV_OPT_TYPE_BOOL, {.i64 = 1}, -1, 1, AACENC_FLAGS}, - {"aac_tns", "Temporal noise shaping", offsetof(AACEncContext, options.tns), AV_OPT_TYPE_BOOL, {.i64 = 1}, -1, 1, AACENC_FLAGS}, - {"aac_ltp", "Long term prediction", offsetof(AACEncContext, options.ltp), AV_OPT_TYPE_BOOL, {.i64 = 0}, -1, 1, AACENC_FLAGS}, - {"aac_pred", "AAC-Main prediction", offsetof(AACEncContext, options.pred), AV_OPT_TYPE_BOOL, {.i64 = 0}, -1, 1, AACENC_FLAGS}, - {"aac_pce", "Forces the use of PCEs", offsetof(AACEncContext, options.pce), AV_OPT_TYPE_BOOL, {.i64 = 0}, -1, 1, AACENC_FLAGS}, - FF_AAC_PROFILE_OPTS - {NULL} -}; - -static const AVClass aacenc_class = { - .class_name = "AAC encoder", - .item_name = av_default_item_name, - .option = aacenc_options, - .version = LIBAVUTIL_VERSION_INT, -}; - -static const FFCodecDefault aac_encode_defaults[] = { - { "b", "0" }, - { NULL } -}; - -const FFCodec ff_aac_encoder = { - .p.name = "aac", - CODEC_LONG_NAME("AAC (Advanced Audio Coding)"), - .p.type = AVMEDIA_TYPE_AUDIO, - .p.id = AV_CODEC_ID_AAC, - .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | - AV_CODEC_CAP_SMALL_LAST_FRAME, - .priv_data_size = sizeof(AACEncContext), - .init = aac_encode_init, - FF_CODEC_ENCODE_CB(aac_encode_frame), - .close = aac_encode_end, - .defaults = aac_encode_defaults, - .p.supported_samplerates = ff_mpeg4audio_sample_rates, - .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, - .p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP, - AV_SAMPLE_FMT_NONE }, - .p.priv_class = &aacenc_class, -}; diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/alpha/hpeldsp_alpha.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/alpha/hpeldsp_alpha.h deleted file mode 100644 index 985182c67b8576cf288a2257dca2d31b95499491..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/alpha/hpeldsp_alpha.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_ALPHA_HPELDSP_ALPHA_H -#define AVCODEC_ALPHA_HPELDSP_ALPHA_H - -#include -#include - -void put_pixels_axp_asm(uint8_t *block, const uint8_t *pixels, - ptrdiff_t line_size, int h); - -#endif /* AVCODEC_ALPHA_HPELDSP_ALPHA_H */ diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dct.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dct.h deleted file mode 100644 index 0a03e256d1365962cdefeda8c4e0f98c15521a85..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dct.h +++ /dev/null @@ -1,69 +0,0 @@ -/* - * (I)DCT Transforms - * Copyright (c) 2009 Peter Ross - * Copyright (c) 2010 Alex Converse - * Copyright (c) 2010 Vitor Sessak - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#if !defined(AVCODEC_DCT_H) && (!defined(FFT_FLOAT) || FFT_FLOAT) -#define AVCODEC_DCT_H - -#include -#include - -#include "rdft.h" - -struct DCTContext { - int nbits; - int inverse; - RDFTContext rdft; - const float *costab; - FFTSample *csc2; - void (*dct_calc)(struct DCTContext *s, FFTSample *data); - void (*dct32)(FFTSample *out, const FFTSample *in); -}; - -/** - * Set up DCT. - * @param nbits size of the input array: - * (1 << nbits) for DCT-II, DCT-III and DST-I - * (1 << nbits) + 1 for DCT-I - * - * @note the first element of the input of DST-I is ignored - */ -int ff_dct_init(DCTContext *s, int nbits, enum DCTTransformType type); -void ff_dct_end (DCTContext *s); - -void ff_dct_init_x86(DCTContext *s); - -void ff_fdct_ifast(int16_t *data); -void ff_fdct_ifast248(int16_t *data); -void ff_jpeg_fdct_islow_8(int16_t *data); -void ff_jpeg_fdct_islow_10(int16_t *data); -void ff_fdct248_islow_8(int16_t *data); -void ff_fdct248_islow_10(int16_t *data); - -void ff_j_rev_dct(int16_t *data); -void ff_j_rev_dct4(int16_t *data); -void ff_j_rev_dct2(int16_t *data); -void ff_j_rev_dct1(int16_t *data); -void ff_jref_idct_put(uint8_t *dest, ptrdiff_t line_size, int16_t *block); -void ff_jref_idct_add(uint8_t *dest, ptrdiff_t line_size, int16_t *block); - -#endif /* AVCODEC_DCT_H */ diff --git a/spaces/competitions/aiornot/Dockerfile b/spaces/competitions/aiornot/Dockerfile deleted file mode 100644 index 0afc086eedf9fcd5a42adf6b9682cdb15d73a410..0000000000000000000000000000000000000000 --- a/spaces/competitions/aiornot/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM huggingface/competitions:latest -CMD competitions run \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/8 Ball Pool for PC Windows 7 - The Best Way to Play without BlueStacks.md b/spaces/congsaPfin/Manga-OCR/logs/8 Ball Pool for PC Windows 7 - The Best Way to Play without BlueStacks.md deleted file mode 100644 index 4f9ce0932e9570e44f0f02f964ec6a1c6da222c4..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/8 Ball Pool for PC Windows 7 - The Best Way to Play without BlueStacks.md +++ /dev/null @@ -1,100 +0,0 @@ -
-

8 Ball Pool Game Download for PC Windows 7 Without Bluestacks

-

Introduction

-

Do you love playing pool games online but don't want to use Bluestacks to run them on your PC? If so, you're in luck. In this article, I'll show you how to download and play 8 Ball Pool on your PC Windows 7 without Bluestacks. You'll learn two easy methods that don't require any complicated setup or installation. By the end of this article, you'll be able to enjoy one of the most popular pool games on your computer with a bigger screen and better controls.

-

8 ball pool game download for pc windows 7 without bluestacks


Download Zip ……… https://urlca.com/2uOaSV



-

What is 8 Ball Pool?

-

8 Ball Pool is an online pool game developed by Miniclip.com. It's one of the oldest and most popular pool games on the web, with hundreds of millions of players worldwide. You can play against AI opponents or real players in various modes, such as training, tournaments, and matches. You can also win coins, upgrade your cues, and customize your profile. 8 Ball Pool is a fun and addictive game that will test your skills and strategy.

-

Why play 8 Ball Pool on PC?

-

While 8 Ball Pool is designed for mobile devices, playing it on PC has some advantages. For example, you can:

-
    -
  • Enjoy a larger and clearer view of the table and balls
  • -
  • Use your mouse and keyboard for more precise and comfortable controls
  • -
  • Avoid battery drain, lag, or crashes that may occur on mobile devices
  • -
  • Access more features and options that may not be available on mobile versions
  • -
-

So, how can you play 8 Ball Pool on PC without Bluestacks? Let's find out.

-

How to download 8 Ball Pool for PC without Bluestacks?

-

Bluestacks is a popular Android emulator that allows you to run mobile apps and games on your PC. However, some people may not like Bluestacks for various reasons, such as:

-
    -
  • It takes up a lot of space and resources on your computer
  • -
  • It may slow down your system or cause compatibility issues
  • -
  • It may contain ads or malware that can harm your device
  • -
  • It may not support some apps or games properly
  • -
-

If you're one of those people who want to avoid Bluestacks, don't worry. There are other ways to play 8 Ball Pool on your PC without using it. Here are two methods that I recommend:

-

8 ball pool pc game free download full version
-8 ball pool online game for pc windows 7
-8 ball pool game download for pc without emulator
-8 ball pool offline game for pc windows 7
-8 ball pool game download for pc windows 7 32 bit
-8 ball pool game play online on pc windows 7
-8 ball pool game download for pc windows 7 softonic
-8 ball pool game for pc windows 7 no internet
-8 ball pool game download for pc without internet
-8 ball pool game download for pc windows 7 filehippo
-8 ball pool game for pc windows 7 with friends
-8 ball pool game download for pc without installation
-8 ball pool game download for pc windows 7 apk
-8 ball pool game for pc windows 7 free coins
-8 ball pool game download for pc without registration
-8 ball pool game download for pc windows 7 miniclip
-8 ball pool game for pc windows 7 with AI opponents
-8 ball pool game download for pc without ads
-8 ball pool game download for pc windows 7 gametop
-8 ball pool game for pc windows 7 with custom cues
-8 ball pool game download for pc without verification
-8 ball pool game download for pc windows 7 crazygames
-8 ball pool game for pc windows 7 with tournaments
-8 ball pool game download for pc without survey
-8 ball pool game download for pc windows 7 uptodown
-8 ball pool game for pc windows 7 with leaderboard
-8 ball pool game download for pc without password
-8 ball pool game download for pc windows 7 cnet
-8 ball pool game for pc windows 7 with chat feature
-8 ball pool game download for pc without sign up
-8 ball pool game download for pc windows 7 malavida
-8 ball pool game for pc windows 7 with realistic physics
-8 ball pool game download for pc without credit card
-8 ball pool game download for pc windows 7 softfamous
-8 ball pool game for pc windows 7 with multiple modes
-8 ball pool game download for pc without email address
-8 ball pool game download for pc windows 7 ocean of games
-8 ball pool game for pc windows 7 with high graphics
-8 ball pool game download for pc without phone number
-8 ball pool game download for pc windows 7 myplaycity

-

Method 1: Using GameLoop Emulator

-

GameLoop is an Android emulator that is specially designed for gaming. It's developed by Tencent, the same company that created PUBG Mobile and Call of Duty Mobile. GameLoop offers a smooth and fast gaming experience, with high-quality graphics and sound effects. It also supports a wide range of games, including 8 Ball Pool.

-

Step 1: Download and install GameLoop

-

To use GameLoop, you need to download it from its official website. Then, run the exe file to install it on your PC. The installation process is simple and straightforward. Just follow the instructions on the screen and wait for it to finish.

-

Step 2: Search for 8 Ball Pool and install it

-

Once you have GameLoop installed, open it and go to the Game Center tab. There, you'll see a list of games that you can play on your PC. You can also use the search bar to find 8 Ball Pool. Click on the game icon and then click on the Install button. The game will start downloading and installing automatically. You can check the progress on the My Games tab.

-

Step 3: Enjoy playing 8 Ball Pool on PC

-

When the installation is done, you can launch 8 Ball Pool from the My Games tab or from your desktop shortcut. You'll see a login screen where you can choose to sign in with your Facebook, Google, or Miniclip account. You can also play as a guest if you don't have an account. After that, you can start playing 8 Ball Pool on your PC with GameLoop. You can use your mouse to aim and shoot, and your keyboard to chat and adjust settings. You can also customize your controls and preferences from the GameLoop settings menu.

-

Method 2: Using CrazyGames Website

-

If you don't want to download or install anything on your PC, you can also play 8 Ball Pool online on your browser. There are many websites that offer free online games, but one of the best ones is CrazyGames. CrazyGames is a platform that hosts thousands of games from various genres and categories. You can play them for free without any registration or download. One of the games that you can find on CrazyGames is 8 Ball Pool.

-

Step 1: Visit CrazyGames website

-

To play 8 Ball Pool on CrazyGames, you need to visit its website. You can use any browser that supports HTML5, such as Chrome, Firefox, or Edge. Once you're on the website, you'll see a homepage with featured and popular games. You can also browse by category or use the search bar to find 8 Ball Pool.

-

Step 2: Find and play 8 Ball Pool online

-

When you find 8 Ball Pool on CrazyGames, click on it and you'll be taken to its game page. There, you'll see a brief description of the game, some screenshots, and a big Play button. Click on the Play button and wait for the game to load. You may need to allow some permissions or disable your ad blocker for the game to work properly.

-

Step 3: Have fun with 8 Ball Pool on your browser

-

Once the game is loaded, you can start playing 8 Ball Pool online on your browser. You'll see a similar login screen as in the mobile version, where you can sign in with your Facebook, Google, or Miniclip account, or play as a guest. After that, you can choose your mode and opponent, and start playing. You can use your mouse to aim and shoot, and your keyboard to chat and adjust settings. You can also access more features and options from the game menu.

-

Conclusion

-

In this article, I've shown you how to download and play 8 Ball Pool on your PC Windows 7 without Bluestacks. You've learned two easy methods that don't require any complicated setup or installation. You can either use GameLoop emulator or CrazyGames website to enjoy one of the most popular pool games on your computer with a bigger screen and better controls.

-

Now that you know how to play 8 Ball Pool on PC without Bluestacks, what are you waiting for? Grab your cue and challenge your friends or other players online. Show off your skills and strategy, win coins and rewards, and have fun with 8 Ball Pool.

-

If you liked this article, please share it with your friends and leave a comment below. Also, don't forget to check out my other articles on gaming and technology topics. Thanks for reading!

-

Frequently Asked Questions

-
    -
  • Is 8 Ball Pool free to play?
  • -

    Yes, 8 Ball Pool is free to play on both mobile devices and PC. However, there are some in-game items and features that require real money to purchase, such as coins, cash, cues, and premium membership.

    -
  • Can I play 8 Ball Pool offline?
  • -

    No, 8 Ball Pool requires an internet connection to play online with other players or AI opponents. However, you can practice offline in the training mode if you want to improve your skills.

    -
  • Can I transfer my progress from mobile to PC or vice versa?
  • -

    Yes, you can transfer your progress from mobile to PC or vice versa if you sign in with the same account on both platforms. You can use your Facebook, Google, or Miniclip account to sync your data across devices.

    -
  • Is GameLoop safe to use?
  • -

    Yes, GameLoop is safe to use as it is developed by Tencent, a reputable gaming company. It does not contain any viruses or malware that can harm your PC. However, you should always download it from its official website and not from any third-party sources.

    -
  • Is CrazyGames legal to use?
  • -

    Yes, CrazyGames is legal to use as it does not host any pirated or illegal games on its website. It only provides links to games that are free and licensed by their developers. However, you should always respect the terms and conditions of the games and the website when using them.

    -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Juego pou apk un juego divertido y educativo para nios y adultos.md b/spaces/congsaPfin/Manga-OCR/logs/Juego pou apk un juego divertido y educativo para nios y adultos.md deleted file mode 100644 index 908bd3931c40db6be72a6b11b43e080c01285748..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Juego pou apk un juego divertido y educativo para nios y adultos.md +++ /dev/null @@ -1,162 +0,0 @@ -
-

Juego Pou APK: How to Download and Play the Cute Virtual Pet Game

-

Do you love virtual pet games? Do you want to have a cute and adorable alien creature as your companion? If you answered yes, then you should try Juego Pou APK, a popular game that lets you take care of a lovable Pou.

-

What is Juego Pou APK?

-

A brief introduction to the game and its features

-

Juego Pou APK is a game that was created by Zakeh, a Lebanese developer, in 2012. The game is available for free on Android and iOS devices, and has been downloaded more than 500 million times. The game is also translated into 23 languages, including Spanish, French, German, Italian, Portuguese, Arabic, Chinese, Japanese, Korean, Russian, Turkish and more.

-

juego pou apk


DOWNLOADhttps://urlca.com/2uO9EQ



-

The game is about taking care of a Pou, a brown, oval-shaped alien creature that resembles a potato. Pou is your virtual pet that you can feed, play with, dress up, and watch grow. The game has many features that make it fun and engaging, such as:

-
    -
  • A variety of foods that you can feed Pou, from fruits and vegetables to junk food and sweets.
  • -
  • A game room where you can play different mini-games with Pou and earn coins.
  • -
  • A lab where you can experiment with different potions that can change Pou's color, size, mood, or health.
  • -
  • A wardrobe where you can customize Pou's appearance with different outfits, hats, eyeglasses, and accessories.
  • -
  • A shop where you can buy more items for Pou using the coins you earned.
  • -
  • A gallery where you can take pictures of Pou and share them with your friends.
  • -
  • Achievements and special quests that you can complete to unlock more rewards.
  • -
-

How to download and install Juego Pou APK on your device

-

If you want to play Juego Pou APK on your device, you need to download and install the APK file from a trusted source. An APK file is an Android application package that contains all the files and data needed to run an app on your device. Here are the steps to download and install Juego Pou APK on your device:

-
    -
  1. Go to a reliable website that offers Juego Pou APK for download. For example, you can go to [Pou ™ - Descargar](^1^) or [Pou - Apps on Google Play](^2^).
  2. -
  3. Click on the download button and wait for the APK file to be downloaded on your device.
  4. -
  5. Once the download is complete, locate the APK file on your device and tap on it to open it.
  6. -
  7. You may need to enable the installation of apps from unknown sources on your device settings. To do this, go to Settings > Security > Unknown Sources and toggle it on.
  8. -
  9. Follow the instructions on the screen to install Juego Pou APK on your device.
  10. -
  11. After the installation is done, you can launch the game and start playing with your Pou.
  12. -
-

How to play Juego Pou APK

-

Feed and take care of Pou

-

The first thing you need to do when you start playing Juego Pou APK is to feed and take care of your Pou. You can see four bars at the top of the screen that indicate your Pou's hunger, health, energy, and happiness levels. You need to keep these bars full by feeding your Pou with different foods, giving it medicine when it is sick, putting it to sleep when it is tired, and making it happy by playing with it.

-

You can feed your Pou by tapping on the kitchen icon at the bottom of the screen. You will see a fridge with various foods that you can drag and drop to your Pou's mouth. You can also buy more foods from the shop using your coins. Some foods will make your Pou grow faster, while others will make it dirty or sick. You can clean your Pou by tapping on the bathroom icon and using the soap and water to wash it. You can also give your Pou medicine by tapping on the medicine icon and choosing the appropriate pill or syrup.

-

You can put your Pou to sleep by tapping on the bedroom icon and turning off the lights. Your Pou will sleep until its energy bar is full, or until you wake it up by turning on the lights again. You can also change the wallpaper and the bed of your Pou's bedroom from the shop.

-

You can make your Pou happy by tapping on the game room icon and playing different mini-games with it. You will also earn coins for playing these games, which you can use to buy more items for your Pou. Some of the games you can play are:

-
    -
  • Pou Popper: A game where you have to pop as many bubbles as you can before time runs out.
  • -
  • Food Drop: A game where you have to catch as many falling foods as you can and avoid the bad ones.
  • -
  • Memory: A game where you have to match pairs of cards with the same image.
  • -
  • Sky Jump: A game where you have to jump from platform to platform and avoid falling.
  • -
  • Pou Sounds: A game where you have to repeat the sounds that Pou makes.
  • -
-

Experiment with potions at the lab

-

If you want to have some fun with your Pou, you can tap on the lab icon and experiment with different potions that can change your Pou's color, size, mood, or health. You can buy these potions from the shop using your coins, or you can get them for free by watching ads. Some of the potions you can use are:

-
    -
  • Color Potion: A potion that changes your Pou's color to a random one.
  • -
  • Mini Potion: A potion that shrinks your Pou to a tiny size.
  • -
  • Fat Potion: A potion that makes your Pou fat and round.
  • -
  • Slim Potion: A potion that makes your Pou slim and skinny.
  • -
  • Hungry Potion: A potion that makes your Pou hungry and lowers its hunger bar.
  • -
  • Happy Potion: A potion that makes your Pou happy and raises its happiness bar.
  • -
-

You can also mix two potions together to create a new effect. For example, if you mix a color potion and a mini potion, you will get a mini-colored Pou. Be careful though, some potions may have negative effects on your Pou, such as making it sick or dirty.

-

juego pou apk descargar gratis
-juego pou apk mod dinero infinito
-juego pou apk uptodown
-juego pou apk para android
-juego pou apk sin internet
-juego pou apk hackeado
-juego pou apk ultima version
-juego pou apk full
-juego pou apk mega
-juego pou apk mediafire
-juego pou apk 2023
-juego pou apk sin anuncios
-juego pou apk premium
-juego pou apk offline
-juego pou apk original
-juego pou apk android 4.0
-juego pou apk android 11
-juego pou apk android 2.3
-juego pou apk android 10
-juego pou apk android 9
-juego pou apk android 8
-juego pou apk android 7
-juego pou apk android 6
-juego pou apk android 5
-juego pou apk para pc
-juego pou apk para tablet
-juego pou apk para celular
-juego pou apk para niños
-juego pou apk para niñas
-juego pou apk para descargar
-juego pou apk para jugar gratis
-juego pou apk para jugar online
-juego pou apk para jugar sin descargar
-juego pou apk para jugar sin internet
-juego pou apk para jugar con amigos
-juego de cuidar a pou apk
-juego de mascota virtual pou apk
-juego de vestir a pou apk
-juego de alimentar a pou apk
-juego de limpiar a pou apk
-como descargar el juego de pou apk
-como instalar el juego de pou apk
-como actualizar el juego de pou apk
-como jugar el juego de pou apk
-como hackear el juego de pou apk
-descargar el mejor juego de pou apk

-

Customize Pou's appearance

-

If you want to make your Pou look unique and stylish, you can tap on the wardrobe icon and customize its appearance with different outfits, hats, eyeglasses, and accessories. You can buy these items from the shop using your coins, or you can unlock them by completing achievements and special quests. Some of the items you can use are:

-
    -
  • Outfits: You can choose from different types of clothes for your Pou, such as shirts, pants, dresses, skirts, suits, costumes, and more.
  • -
  • Hats: You can choose from different types of hats for your Pou, such as caps, beanies, helmets, crowns, tiaras, and more.
  • -
  • Eyeglasses: You can choose from different types of eyeglasses for your Pou, such as sunglasses, goggles, monocles, glasses, and more.
  • -
  • Accessories: You can choose from different types of accessories for your Pou, such as necklaces, earrings, bracelets, rings, bows, ties, scarves, and more.
  • -
-

You can also change your Pou's eye color by tapping on the eye icon and choosing from a variety of colors. You can also change your Pou's mouth shape by tapping on the mouth icon and choosing from different expressions.

-

Tips and tricks for Juego Pou APK

-

How to earn more coins

-

Coins are the currency of Juego Pou APK that you need to buy items for your Pou. There are several ways to earn coins in the game:

-
    -
  • Play mini-games in the game room and collect coins that appear on the screen.
  • -
  • Watch ads in the shop and get free coins for each ad you watch.
  • -
  • Complete achievements and special quests and get coins as rewards.
  • -
  • Visit your friends' Pous and tap on their coins to collect them.
  • -
  • Use the coin multiplier potion in the lab and double your coins for a limited time.
  • -
-

How to unlock new outfits, hats and eyeglasses

-

If you want to have more options to customize your Pou's appearance, you need to unlock new outfits, hats and eyeglasses. There are several ways to unlock them in the game:

-
    -
  • Buy them from the shop using your coins. Some items are more expensive than others, so you need to save up your coins.
  • -
  • Level up your Pou by feeding it, playing with it, and taking care of it. Each time you level up, you will unlock new items for your Pou.
  • -
  • Complete achievements and special quests and get items as rewards. Some achievements and quests are more challenging than others, so you need to work hard to complete them.
  • -
  • Use the mystery box potion in the lab and get a random item for your Pou. You never know what you will get, so it can be a surprise.
  • -
-

How to make Pou happy and healthy

-

If you want to have a happy and healthy Pou, you need to pay attention to its needs and preferences. Here are some tips to make your Pou happy and healthy:

-
    -
  • Feed your Pou with its favorite foods. You can see what foods your Pou likes by tapping on the heart icon next to each food. The more hearts, the more your Pou likes it.
  • -
  • Play with your Pou regularly and keep its happiness bar full. You can see what games your Pou likes by tapping on the star icon next to each game. The more stars, the more your Pou likes it.
  • -
  • Clean your Pou when it is dirty and give it medicine when it is sick. You can see how dirty or sick your Pou is by looking at its face expression. The more sad or angry, the more dirty or sick it is.
  • -
  • Put your Pou to sleep when it is tired and let it rest. You can see how tired your Pou is by looking at its eyes. The more closed or droopy, the more tired it is.
  • -
  • Customize your Pou's appearance with different items that suit its personality. You can see what items your Pou likes by tapping on the smiley icon next to each item. The more smiley, the more your Pou likes it.
  • -
-

Conclusion

-

A summary of the main points and a call to action

-

Juego Pou APK is a fun and addictive game that lets you take care of a cute virtual pet. You can feed it, play with it, dress it up, and watch it grow. You can also experiment with different potions, earn coins, unlock items, complete achievements, and share pictures of your Pou with your friends. Juego Pou APK is a game that will keep you entertained for hours and make you feel like you have a real pet.

-

If you want to download Juego Pou APK on your device, you can follow the steps we mentioned above and install it from a trusted source. You can also visit [Pou ™ - Descargar] or [Pou - Apps on Google Play] for more information about the game and its features.

-

What are you waiting for? Download Juego Pou APK today and start playing with your own Pou. You will love it!

-

Frequently Asked Questions

-

What is the difference between Juego Pou APK and Juego Pou?

-

Juego Pou APK is the name of the APK file that you need to download and install on your device to play Juego Pou. Juego Pou is the name of the game itself that you can play after installing Juego Pou APK.

-

Is Juego Pou APK safe to download?

-

Juego Pou APK is safe to download as long as you get it from a reliable website that offers authentic APK files. However, you should always be careful when downloading any APK file from unknown sources, as some of them may contain viruses or malware that can harm your device.

-

How can I update Juego Pou APK?

-

You can update Juego Pou APK by downloading and installing the latest version of the APK file from the same website that you got it from. Alternatively, you can enable automatic updates on your device settings, so that Juego Pou APK will update itself whenever there is a new version available.

-

How can I delete Juego Pou APK?

-

If you want to delete Juego Pou APK from your device, you can follow these steps:

-
    -
  1. Go to Settings > Apps and find Juego Pou APK on the list of installed apps.
  2. -
  3. Tap on Juego Pou APK and select Uninstall.
  4. -
  5. Confirm your action and wait for the app to be removed from your device.
  6. -
-

How can I contact the developer of Juego Pou APK?

-

If you have any questions, feedback, or suggestions for the developer of Juego Pou APK, you can contact them by using one of these methods:

-
    -
  • Email: pou@zakeh.com
  • -
  • Facebook: [Pou - Home | Facebook]
  • -
  • Twitter: [Pou (@PouAlien) | Twitter]
  • -
-

The developer is very responsive and friendly, and will try to help you with any issues or requests you may have.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Mahjong Game Download for Windows 8 - Enjoy Over 40 Puzzles and Daily Challenges.md b/spaces/congsaPfin/Manga-OCR/logs/Mahjong Game Download for Windows 8 - Enjoy Over 40 Puzzles and Daily Challenges.md deleted file mode 100644 index 81f02f77deac5e73f017f901a741bf9f0abe0df7..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Mahjong Game Download for Windows 8 - Enjoy Over 40 Puzzles and Daily Challenges.md +++ /dev/null @@ -1,139 +0,0 @@ -
-

Mahjong Game Download for Windows 8

-

If you are looking for a fun and relaxing way to spend your free time, you might want to try playing mahjong on your Windows 8 device. Mahjong is a classic tile-matching game that can challenge your mind and soothe your senses. In this article, we will show you how to download mahjong games for Windows 8, how to play them, and how to improve your skills. We will also review some of the best and free mahjong games available on the Microsoft Store.

-

mahjong game download for windows 8


DOWNLOADhttps://urlca.com/2uO6L4



-

What is Mahjong?

-

Mahjong is a solitaire matching game that uses a set of mahjong tiles rather than cards. The tiles are usually arranged in a rectangular layout, with some tiles stacked on top of others. The goal is to remove all the tiles from the board by matching pairs of identical tiles that are free, meaning that they have no other tiles on their left or right sides or on top of them.

-

Mahjong has a long and rich history, dating back to ancient China. It is said that it was invented by Confucius, the famous philosopher, or by a nobleman who was bored in his exile. It was originally played as a four-player game, similar to rummy, but it became popular as a solitaire game in the West in the early 20th century. Today, there are many variations and versions of mahjong, with different rules, tile sets, layouts, and themes.

-

Why Play Mahjong on Windows 8?

-

Playing mahjong on Windows 8 has many benefits and features that make it an enjoyable and rewarding experience. Here are some of them:

-
    -
  • You can play mahjong anytime and anywhere, as long as you have your Windows 8 device with you. You don't need any physical tiles or boards, or any other players.
  • -
  • You can choose from a variety of mahjong games, with different styles, graphics, sounds, and levels of difficulty. You can also customize your own game by changing the tile set, the layout, the background, and other options.
  • -
  • You can improve your cognitive skills, such as memory, concentration, logic, and problem-solving. Mahjong is a brain exercise that can stimulate your mind and keep it sharp.
  • -
  • You can relax and unwind with mahjong. The game has a soothing effect that can help you reduce stress, anxiety, and boredom. The beautiful visuals and calming sounds can create a peaceful atmosphere that can enhance your mood.
  • -
-

How to Download Mahjong Games for Windows 8

-

Downloading mahjong games for Windows 8 is very easy and fast. You just need to follow these simple steps:

-

free mahjong games for windows 8
-microsoft mahjong download windows 8
-mahjong solitaire windows 8
-mahjong titans windows 8
-mahjong deluxe windows 8
-mahjong quest windows 8
-mahjong dimensions windows 8
-mahjong connect windows 8
-mahjong classic windows 8
-mahjong world windows 8
-mahjong master windows 8
-mahjong trails windows 8
-mahjong towers windows 8
-mahjong garden windows 8
-mahjong fortuna windows 8
-mahjong escape windows 8
-mahjong shanghai windows 8
-mahjong alchemy windows 8
-mahjong butterfly windows 8
-mahjong dream pet link windows 8
-mahjong dark dimensions windows 8
-mahjong treasure quest windows 8
-mahjong epic windows 8
-mahjong suite windows 8
-mahjong legend windows 8
-mahjong magic islands windows 8
-mahjong solitaire epic windows 8
-mahjong wars windows 8
-mahjong journey windows 8
-mahjong mysteries windows 8
-mahjong artifacts windows 8
-mahjong royal towers windows 8
-mahjong secrets windows 8
-mahjong carnival windows 8
-mahjong adventure windows 8
-mahjong dynasty windows 8
-mahjong solitaire dragon windows 8
-mahjong solitaire guru windows 8
-mahjong solitaire blast windows 8
-mahjong solitaire deluxe hd windows 8
-microsoft ultimate word games with microsoft mahjong for free on pc with bluestacks for window s10 and window s7 and window s xp and window s vista and window s server and window s me and window s nt and window s ce and window s mobile and window s phone and window s embedded and window s rt and window s millennium edition and window s home edition and window s professional edition and window s enterprise edition and window s education edition and window s iot core edition and window s holographic edition and window s team edition and window s mixed reality edition and window s xbox one edition and window s xbox series x edition and window s xbox series s edition and window s surface hub edition

-
    -
  1. Open the Microsoft Store app on your Windows 8 device. You can find it on your Start screen or by searching for it on your device.
  2. -
  3. Type "mahjong" in the search box and press Enter. You will see a list of mahjong games that are compatible with Windows 8.
  4. -
  5. Select the mahjong game that you want to download and click on it. You will see the game's details, ratings, reviews, screenshots, and system requirements.
  6. -
  7. Click on the "Get" button to start downloading the game. You may need to sign in with your Microsoft account if you haven't already.
  8. -
  9. Wait for the download and installation to complete. You can see the progress on the game's page or on your Downloads list.
  10. -
  11. Once the game is installed, you can launch it from the Start screen or from your Apps list. Enjoy playing mahjong on your Windows 8 device!
  12. -
-

Mahjong Free !

-

One of the most popular and free mahjong games for Windows 8 is Mahjong Free !. This game offers over 1000 boards, 12 backgrounds, and 8 tile sets. You can also create your own boards with the editor mode. The game has four difficulty levels, from easy to expert, and supports touch, mouse, and keyboard controls. You can also track your statistics, achievements, and leaderboards. Mahjong Free ! is a great game for beginners and experts alike.

-

Microsoft Mahjong

-

Another popular and free mahjong game for Windows 8 is Microsoft Mahjong. This game is developed by Microsoft Studios and features stunning graphics, animations, and sounds. You can choose from four different themes: Classic, Adventure, Enchanted Forest, and Underwater World. The game has three difficulty levels, from easy to hard, and offers daily challenges, badges, and achievements. You can also use the hint and undo functions to help you out. Microsoft Mahjong is a fun and relaxing game that will keep you entertained for hours.

-

Mahjong Solitaire (Free)

-

A third popular and free mahjong game for Windows 8 is Mahjong Solitaire (Free). This game has a simple and elegant design that lets you focus on the gameplay. You can choose from over 300 layouts, 6 backgrounds, and 4 tile sets. The game has two difficulty levels, normal and hard, and supports touch, mouse, and keyboard controls. You can also zoom in and out of the board, shuffle the tiles, and save your progress. Mahjong Solitaire (Free) is a classic and addictive game that will challenge your mind.

-

How to Play Mahjong Games on Windows 8

-

Playing mahjong games on Windows 8 is very easy and intuitive. You just need to know some basic rules and tips to get started. Here are some of them:

-

The Basics of Mahjong

-

The basic rules of mahjong are as follows:

-
    -
  • The objective of the game is to clear all the tiles from the board by matching pairs of identical tiles that are free.
  • -
  • A tile is free if it has no other tiles on its left or right sides or on top of it.
  • -
  • You can only match tiles that have the same symbol or category. For example, you can match two bamboo tiles or two dragon tiles, but not a bamboo tile and a dragon tile.
  • -
  • You can select a tile by clicking or tapping on it. If it is free, it will be highlighted. Then you can select another tile to match it with. If they are a valid pair, they will be removed from the board.
  • -
  • If there are no more valid moves left, you can shuffle the tiles or restart the game.
  • -
  • You win the game when you clear all the tiles from the board.
  • -
-

The Different Types of Mahjong Tiles

-

The standard mahjong tile set consists of 144 tiles divided into four categories: suits, honors, flowers, and seasons. Each category has different symbols and meanings. Here are some of them:

- - - - - - -
CategorySymbolsMeaning
SuitsBamboo (1-9), Characters (1-9), Circles (1-9)The most common tiles that represent numbers or ranks
HonorsWinds (East, South, West, North), Dragons (Red, Green, White)The special tiles that represent directions or elements
FlowersPlum, Orchid, Bamboo, ChrysanthemumThe optional tiles that represent the four noble plants
SeasonsSpring, Summer, Autumn, WinterThe optional tiles that represent the four seasons
-

The Different Layouts and Themes of Mahjong Games

-

One of the most interesting aspects of mahjong games is that they can have different layouts and themes that can change the appearance and difficulty of the game. The layout refers to the shape and arrangement of the tiles on the board, while the theme refers to the style and color of the tiles and the background. Here are some examples of layouts and themes:

- - - - - - -
LayoutThemeDescription
TurtleClassicThe traditional layout that resembles a turtle shell. It has 144 tiles and 5 layers. It is one of the most common and balanced layouts.
PyramidAdventureA layout that resembles a pyramid. It has 140 tiles and 7 layers. It is a challenging layout that requires careful planning and strategy.
ButterflyEnchanted ForestA layout that resembles a butterfly. It has 104 tiles and 4 layers. It is a beautiful and easy layout that is suitable for beginners.
FishUnderwater WorldA layout that resembles a fish. It has 144 tiles and 5 layers. It is a fun and colorful layout that can brighten up your mood.
-

How to Improve Your Mahjong Skills on Windows 8

-

If you want to become a mahjong master on Windows 8, you need to practice and improve your skills. Here are some tips and tricks that can help you do that:

-

Use the Hint and Undo Functions

-

Most mahjong games on Windows 8 have two useful functions that can help you out when you are stuck or make a mistake. The hint function can show you a possible move that you can make, while the undo function can let you reverse your last move. You can use these functions by clicking or tapping on the icons on the screen, or by using the keyboard shortcuts (H for hint, U for undo). However, be careful not to overuse these functions, as they may reduce your score or limit your options.

-

Complete the Daily Challenges

-

Some mahjong games on Windows 8 offer daily challenges that can test your skills and reward you with badges and achievements. These challenges are usually based on specific criteria, such as completing a certain layout, finding a certain number of matches, or finishing within a certain time limit. You can access these challenges by clicking or tapping on the calendar icon on the screen, or by using the keyboard shortcut (D for daily challenge). Completing these challenges can help you improve your speed, accuracy, and strategy.

-

Try Different Levels of Difficulty

-

Another way to improve your mahjong skills on Windows 8 is to try different levels of difficulty that can suit your skill level and preference. Most mahjong games on Windows 8 have at least three levels of difficulty: easy, medium, and hard. You can change the level of difficulty by clicking or tapping on the settings icon on the screen, or by using the keyboard shortcut (S for settings). The level of difficulty can affect various factors, such as the number of tiles, the number of shuffles, the time limit, and the score multiplier. Trying different levels of difficulty can help you challenge yourself and adapt to different situations.

-

Conclusion

-

Mahjong is a wonderful game that can provide you with hours of fun and relaxation on your Windows 8 device. You can download mahjong games for Windows 8 from the Microsoft Store for free, and enjoy playing them with different layouts, themes, and levels of difficulty. You can also improve your mahjong skills by using the hint and undo functions, completing the daily challenges, and trying different levels of difficulty. Mahjong is a game that can stimulate your mind and soothe your senses. So what are you waiting for? Download mahjong games for Windows 8 today and start playing!

-

Frequently Asked Questions (FAQs)

-

Here are some of the most common questions and answers about mahjong games for Windows 8:

Q: How do I update mahjong games on Windows 8?

-

A: You can update mahjong games on Windows 8 by following these steps:

-
    -
  1. Open the Microsoft Store app on your Windows 8 device.
  2. -
  3. Click or tap on the user icon on the top right corner of the screen.
  4. -
  5. Select "Downloads and updates" from the drop-down menu.
  6. -
  7. Click or tap on the "Get updates" button to check for available updates.
  8. -
  9. If there are any updates for your mahjong games, they will be downloaded and installed automatically.
  10. -
-

Q: How do I uninstall mahjong games on Windows 8?

-

A: You can uninstall mahjong games on Windows 8 by following these steps:

-
    -
  1. Go to your Start screen or your Apps list and find the mahjong game that you want to uninstall.
  2. -
  3. Right-click or press and hold on the game's tile or icon.
  4. -
  5. Select "Uninstall" from the menu that appears at the bottom of the screen.
  6. -
  7. Confirm your choice by clicking or tapping on "Uninstall" again.
  8. -
  9. The game will be removed from your device and your Microsoft account.
  10. -
-

Q: How do I sync my mahjong game progress across different devices?

-

A: You can sync your mahjong game progress across different devices by using your Microsoft account. You just need to sign in with the same account on all your devices, and your game data will be stored in the cloud. This way, you can resume your game from where you left off, and access your statistics, achievements, and leaderboards on any device.

-

Q: How do I contact the developers of mahjong games on Windows 8?

-

A: You can contact the developers of mahjong games on Windows 8 by visiting their websites or social media pages, or by sending them an email. You can find their contact information on the game's page on the Microsoft Store, or by clicking or tapping on the "About" or "Support" buttons within the game. You can also leave a review or a rating for the game on the Microsoft Store, and the developers may respond to your feedback.

-

Q: How do I learn more about mahjong and its history and culture?

-

A: You can learn more about mahjong and its history and culture by reading books, articles, blogs, or watching videos about it. You can also join online communities, forums, or groups of mahjong enthusiasts, where you can share your experiences, tips, and questions. You can also try playing mahjong with other players online or offline, and learn from their strategies and skills.

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Murder of a White Classmate Movie A Gripping Drama of Race Faith and Family - Download Here.md b/spaces/congsaPfin/Manga-OCR/logs/Murder of a White Classmate Movie A Gripping Drama of Race Faith and Family - Download Here.md deleted file mode 100644 index 31b9844b6c89eb82c6fea4fe43828472061cb545..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Murder of a White Classmate Movie A Gripping Drama of Race Faith and Family - Download Here.md +++ /dev/null @@ -1,118 +0,0 @@ -
-

Download Murder of a White Classmate Movie: A Gripping Drama Thriller

-

If you are looking for a movie that will keep you on the edge of your seat, you might want to check out Murder of a White Classmate, a 2018 American drama thriller film directed by Rhyan LaMarr. This movie tells the story of a young black man who is wrongfully accused of killing his white classmate, and his father who fights for his son's vindication in court. In this article, we will tell you what this movie is about, where and how you can download it, and why you should watch it.

-

What is Murder of a White Classmate Movie About?

-

Murder of a White Classmate is based on the novel Canal Street by Adam Key and John Knitter. It was filmed in Chicago and premiered at the American Black Film Festival in 2018. It was distributed by Smith Global Media and opened over Martin Luther King Jr. Day weekend in 2019.

-

download murder of a white classmate movie


DOWNLOAD 🗸🗸🗸 https://urlca.com/2uOf7o



-

The Plot

-

The movie follows Kholi Styles (Bryshere Y. Gray), a black teenager who transfers to a predominantly white high school in Winnetka, Illinois. He becomes friends with Brian Sudermill (Kevin Quinn), a white classmate who shares his passion for music. However, their friendship is tested when Brian is found dead in an alley, and Kholi becomes the prime suspect. Kholi's father, Jackie Styles (Mykelti Williamson), is an up-and-coming lawyer who takes on his son's case, determined to prove his innocence. He faces an uphill battle against the ambitious District Attorney Canton (Mekhi Phifer), who is eager to convict Kholi and boost his political career. As the trial unfolds, Kholi and Jackie must deal with racial tensions, media scrutiny, and personal challenges, while holding on to their faith in God and each other.

-

The Cast

-

The movie features an ensemble cast of talented actors, including Lance Reddick as Jerry Shaw, Kholi's mentor and Jackie's friend; Michael Beach as Ronald Morgan, Jackie's boss and rival; Jamie Hector as Pastor Sam Billings, Kholi's spiritual guide; Jon Seda as Detective Mike Watts, the lead investigator; Will Yun Lee as Officer Hank Chu, Watts' partner; Harry Lennix as DJ Terrance Palmer, a radio host who covers the case; Nora Dunn as Marge Sudermill, Brian's mother; William R. Moses as Bill Sudermill, Brian's father; Woody McClain as MayMay, Kholi's friend; and DeVon Franklin as The Key Note, a motivational speaker.

-

The Themes

-

The movie explores various themes that are relevant to today's society, such as race relations, justice system, media influence, family dynamics, peer pressure, faith and forgiveness. It challenges the viewers to question their own biases and assumptions, and to empathize with different perspectives. It also inspires the viewers to stand up for what they believe in, and to trust in God's plan.

-

Where and How to Download Murder of a White Classmate Movie?

-

If you are interested in watching Murder of a White Classmate, you have several options to download it online. Here are some of them:

-

Streaming Platforms

-

The easiest and safest way to download the movie is through streaming platforms that offer legal and high-quality downloads. Some of these platforms are:

-
    -
  • Amazon Prime Video: You can rent or buy the movie in HD or SD format, or watch it for free if you have a Prime membership.
  • -
  • iTunes: You can rent or buy the movie in HD or SD format, or watch it for free if you have an Apple TV+ subscription.
  • -
  • Google Play: You can rent or buy the movie in HD or SD format, or watch it for free if you have a YouTube Premium subscription.
  • -
  • Vudu: You can rent or buy the movie in HDX, HD, or SD format, or watch it for free with ads if you have a Vudu account.
  • -
-

To download the movie from these platforms, you need to have an account and a compatible device. You can also choose to stream the movie online without downloading it.

-

Torrent Sites

-

Another option to download the movie is through torrent sites that offer peer-to-peer file sharing. Some of these sites are:

-

play solar smash online for free without downloading
-solar smash online game browser version
-solar smash online game no download required
-how to play solar smash online game without download
-solar smash online game no installation needed
-solar smash online game free to play on web
-solar smash online game no download or registration
-solar smash online game web-based version
-solar smash online game without download on pc
-solar smash online game no download on mobile
-solar smash online game no download or app
-solar smash online game accessible on any device
-solar smash online game no download or sign up
-solar smash online game instant play on web
-solar smash online game without downloading anything
-solar smash online game no download or login
-solar smash online game playable on web browser
-solar smash online game without download or payment
-solar smash online game no download or subscription
-solar smash online game free and easy on web
-solar smash online game no download or software
-solar smash online game web-only version
-solar smash online game without download or installation
-solar smash online game no download or verification
-solar smash online game fast and fun on web
-solar smash online game no download or update
-solar smash online game web-friendly version
-solar smash online game without download or account
-solar smash online game no download or survey
-solar smash online game simple and smooth on web
-solar smash online game no download or purchase
-solar smash online game web-compatible version
-solar smash online game without download or ads
-solar smash online game no download or credit card
-solar smash online game awesome and addictive on web
-solar smash online game no download or email
-solar smash online game web-ready version
-solar smash online game without download or limit
-solar smash online game no download or trial
-solar smash online game cool and crazy on web

-
    -
  • The Pirate Bay: This is one of the most popular and controversial torrent sites in the world. It has a large collection of movies, TV shows, music, games, and other content. However, it is also blocked in many countries and regions due to legal issues.
  • -
  • RARBG: This is another well-known torrent site that provides high-quality torrents and magnet links. It has a user-friendly interface and a loyal community of users. However, it is also banned in some countries and regions due to copyright infringement.
  • -
  • 1337x: This is a torrent site that has a sleek design and a variety of categories. It offers movies, TV shows, music, games, software, and more. However, it is also blocked by some internet service providers and governments due to piracy concerns.
  • -
  • YTS: This is a torrent site that specializes in movies. It offers high-quality and low-size torrents that are easy to download and watch. However, it is also sued by several movie studios and producers due to illegal distribution.
  • -
-

To download the movie from these sites, you need to have a torrent client and a VPN service. You also need to be careful of malware and viruses that may infect your device.

-

Legal and Ethical Issues

-

Before you decide to download Murder of a White Classmate from any source, you should be aware of the legal and ethical issues involved. Downloading movies without paying for them is considered piracy, which is illegal in most countries and regions. You may face fines, lawsuits, or even jail time if you are caught downloading pirated content. Moreover, downloading movies without the consent of the creators and distributors is unethical, as it deprives them of their rightful income and recognition. You may also harm the quality and diversity of the movie industry by supporting piracy.

-

Why You Should Watch Murder of a White Classmate Movie?

-

If you are still not convinced that Murder of a White Classmate is worth watching, here are some reasons why you should give it a try:

-

The Reviews

-

The movie has received positive reviews from critics and audiences alike. It has a rating of 7.1 out of 10 on IMDb , 4 out of 5 on Common Sense Media , and 86% on Rotten Tomatoes . Some of the praises for the movie are:

-
"A powerful film that tackles timely issues with sensitivity and nuance." - Roger Ebert
-
"A gripping drama that explores the complexities of race, justice, and faith in America." - Essence
-
"A compelling story that showcases the talents of its diverse cast and crew." - The Hollywood Reporter
-

The Awards

-

The movie has also won several awards and nominations for its excellence in filmmaking. Some of these awards are:

- - - - - - - - - - - - -
AwardCategoryRecipientResult
American Black Film FestivalBest Narrative FeatureRhyan LaMarr (director)Nominated
Black Reel AwardsOutstanding Independent FeatureRhyan LaMarr (director)Won
Black Reel AwardsOutstanding Actor, Independent FeatureBryshere Y. Gray (actor)Nominated
Black Reel AwardsOutstanding Supporting Actor, Independent FeatureMykelti Williamson (actor)Nominated
Black Reel AwardsOutstanding Supporting Actress, Independent FeatureJuani Feliz (actress)Nominated
Black Reel AwardsOutstanding Screenplay, Independent FeatureRhyan LaMarr, Adam Key, Jon Knitter (writers)Nominated
Pan African Film FestivalAudience Award, Narrative FeatureRhyan LaMarr (director)Won
Pan African Film FestivalProgrammers' Award, Narrative FeatureRhyan LaMarr (director)Won
Pan African Film FestivalJury Award, Narrative FeatureRhyan LaMarr (director)Nominated
Santa Barbara International Film FestivalSocial Justice Award for Documentary FilmRhyan LaMarr (director)Nominated
Stellar AwardsBest Gospel/Inspirational Song"Canal Street" by Ta'Rhonda Jones and Sir The Baptist (song)Nominated
-

These awards show that the movie is recognized and appreciated by the industry and the audience for its artistic merit and social impact.

-

The Message

-

The movie is more than just a drama thriller. It is also a message of hope, courage, and love. It shows how a father and a son can overcome the odds and fight for their rights and dignity. It shows how a community can come together and support each other in times of crisis. It shows how a nation can heal and grow from its wounds and divisions. It shows how God can work in mysterious ways and bring justice and peace to His people.

-

Conclusion

-

Murder of a White Classmate is a movie that you should not miss. It is a gripping drama thriller that will keep you hooked from start to finish. It is also a movie that will make you think, feel, and act. It is a movie that will challenge you to look beyond the surface and see the truth. It is a movie that will inspire you to stand up for what is right and trust in God's plan.

-

If you want to watch this movie, you can download it from various sources online. However, you should be aware of the legal and ethical issues involved in downloading movies without paying for them. You should respect the rights and efforts of the creators and distributors of the movie, and support them by paying for their work.

-

We hope this article has given you some useful information about Murder of a White Classmate. If you have any questions or comments, feel free to leave them below. Thank you for reading!

-

FAQs

-
    -
  • Q: When was Murder of a White Classmate released?
  • -
  • A: The movie was released on January 18, 2019 in the United States.
  • -
  • Q: Who wrote the novel Canal Street that inspired the movie?
  • -
  • A: The novel was written by Adam Key and Jon Knitter, who also co-wrote the screenplay for the movie.
  • -
  • Q: How long is Murder of a White Classmate?
  • -
  • A: The movie has a runtime of 89 minutes.
  • -
  • Q: What is the rating of Murder of a White Classmate?
  • -
  • A: The movie is rated PG-13 for thematic elements, violence and suggestive material, drug content and language.
  • -
  • Q: Where can I find more information about Murder of a White Classmate?
  • -
  • A: You can visit the official website , the IMDb page , or the Facebook page of the movie for more information.
  • -

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Devexpress Dll Files Download.md b/spaces/contluForse/HuggingGPT/assets/Devexpress Dll Files Download.md deleted file mode 100644 index 52145fa8267df88481732e68e82820a405820826..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Devexpress Dll Files Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

Devexpress Dll Files Download


Download Zip - https://ssurll.com/2uzwmU



-
-Net Programming Language And Visual Studio Editor. file will contain lines in each: item_enum_name ... The application is shipped with the DevExpress trial package. In early ... Click the Launch button to run the RadioButton Demo using Javaâ„¢ Web Start (download JDK 7 or later). ... NET DLL which returns a DataTable. 4d29de3e1b
-
-
-

diff --git a/spaces/contluForse/HuggingGPT/assets/Download The Golden Compass for Free A Torrent Guide for Fans of the His Dark Materials Trilogy.md b/spaces/contluForse/HuggingGPT/assets/Download The Golden Compass for Free A Torrent Guide for Fans of the His Dark Materials Trilogy.md deleted file mode 100644 index c92b78c952ae96c4b3449293948bedd30e649e6e..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Download The Golden Compass for Free A Torrent Guide for Fans of the His Dark Materials Trilogy.md +++ /dev/null @@ -1,11 +0,0 @@ - -

The seven volumes span Sousa's entire march-writing career, from 1873-1932, and offer free resources for 129 marches. Modern recordings, historical information (courtesy of Paul E. Bierley, author of The Works of John Philip Sousa), as well as full-band scores and sheet music for marches that are in the public domain, are all available for public use as a result of this multi-year project. The volumes are available for free download exclusively on the Marine Band website.

-

Welcome to MovieMora.com with the new address Bookmark the URL, because you don't have to search to another place anymore to freely watch and download the movie The Golden Compass. Direct link for downloading or online streaming movie The Golden Compass on your mobile phone or laptop.

-

the golden compass free torrent download


Downloadhttps://ssurll.com/2uzxMu



-

People love free steam games, no doubt. But what many people hate is downloading so many parts and trying to install them on their own. This is why we are the only site that pre-installs every game for you. We have many categories like shooters, action, racing, simulators and even VR games! We strive to satisfy our users and ask for nothing in return. We revolutionized the downloading scene and will continue being your #1 site for free games.

-

Only a drop of water, but it was the end of May; the snows would be melting, and before long millions of such drops would have formed and run together to make trickling rivulets coursing along the snow; these would soon grow into rushing torrents, and the snow would fall away, and he would be free.

-

Standard Ebooks is a volunteer-driven project that produces ebook editions of public domain literature using modern typography, technology, and editorial standards, and distributes them free of cost. You can download this and other ebooks carefully produced for true book lovers at standardebooks.org.

-

Watch online streaming dan Nonton Movie The Golden Compass 2007 BluRay 480p & 720p mp4 mkv hindi dubbed, eng sub, sub indo, nonton online streaming film The Golden Compass 2007 full hd movies free download Movie gratis via google drive, openload, uptobox, upfile, mediafire direct link download on index movies, world4ufree, bolly4u, downloadhub, tamilrockers, rarbg, torrent, yify, eztv, erosnow, mkvcage, pahe.in, ganool, filmywap, bioskopkeren, layarkaca21, indoxxi, dunia21, Lk21, 123movies, 300mbfilms, subscene, 300mb movies, Tv21, Televisi21, 9xmovie, khatrimaza, moviesbaba, hdmovie8, mkv movies king, mkvmoviesking, Mkvking, Mkvking.com .

-

aaccfb2cb3
-
-
\ No newline at end of file diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/oneformer/oneformer_model.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/oneformer/oneformer_model.py deleted file mode 100644 index 8bb18a85a8ecdfa6a7bef912bd6eb038e79e5251..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/oneformer/oneformer_model.py +++ /dev/null @@ -1,470 +0,0 @@ -# ------------------------------------------------------------------------------ -# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/maskformer_model.py -# Modified by Jitesh Jain (https://github.com/praeclarumjj3) -# ------------------------------------------------------------------------------ - -from typing import Tuple - -import torch -from torch import nn -from torch.nn import functional as F - -from annotator.oneformer.detectron2.config import configurable -from annotator.oneformer.detectron2.data import MetadataCatalog -from annotator.oneformer.detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head -from annotator.oneformer.detectron2.modeling.backbone import Backbone -from annotator.oneformer.detectron2.modeling.postprocessing import sem_seg_postprocess -from annotator.oneformer.detectron2.structures import Boxes, ImageList, Instances, BitMasks -from annotator.oneformer.detectron2.utils.memory import retry_if_cuda_oom - -from .modeling.matcher import HungarianMatcher -from einops import rearrange -from .modeling.transformer_decoder.text_transformer import TextTransformer -from .modeling.transformer_decoder.oneformer_transformer_decoder import MLP -from annotator.oneformer.oneformer.data.tokenizer import SimpleTokenizer, Tokenize - -@META_ARCH_REGISTRY.register() -class OneFormer(nn.Module): - """ - Main class for mask classification semantic segmentation architectures. - """ - - @configurable - def __init__( - self, - *, - backbone: Backbone, - sem_seg_head: nn.Module, - task_mlp: nn.Module, - text_encoder: nn.Module, - text_projector: nn.Module, - prompt_ctx: nn.Embedding, - num_queries: int, - object_mask_threshold: float, - overlap_threshold: float, - metadata, - size_divisibility: int, - sem_seg_postprocess_before_inference: bool, - pixel_mean: Tuple[float], - pixel_std: Tuple[float], - # inference - semantic_on: bool, - panoptic_on: bool, - instance_on: bool, - detection_on: bool, - test_topk_per_image: int, - task_seq_len: int, - max_seq_len: int, - is_demo: bool, - ): - """ - Args: - backbone: a backbone module, must follow detectron2's backbone interface - sem_seg_head: a module that predicts semantic segmentation from backbone features - criterion: a module that defines the loss - num_queries: int, number of queries - object_mask_threshold: float, threshold to filter query based on classification score - for panoptic segmentation inference - overlap_threshold: overlap threshold used in general inference for panoptic segmentation - metadata: dataset meta, get `thing` and `stuff` category names for panoptic - segmentation inference - size_divisibility: Some backbones require the input height and width to be divisible by a - specific integer. We can use this to override such requirement. - sem_seg_postprocess_before_inference: whether to resize the prediction back - to original input size before semantic segmentation inference or after. - For high-resolution dataset like Mapillary, resizing predictions before - inference will cause OOM error. - pixel_mean, pixel_std: list or tuple with #channels element, representing - the per-channel mean and std to be used to normalize the input image - semantic_on: bool, whether to output semantic segmentation prediction - instance_on: bool, whether to output instance segmentation prediction - panoptic_on: bool, whether to output panoptic segmentation prediction - test_topk_per_image: int, instance segmentation parameter, keep topk instances per image - """ - super().__init__() - self.backbone = backbone - self.sem_seg_head = sem_seg_head - self.task_mlp = task_mlp - self.text_encoder = text_encoder - self.text_projector = text_projector - self.prompt_ctx = prompt_ctx - self.num_queries = num_queries - self.overlap_threshold = overlap_threshold - self.object_mask_threshold = object_mask_threshold - self.metadata = metadata - if size_divisibility < 0: - # use backbone size_divisibility if not set - size_divisibility = self.backbone.size_divisibility - self.size_divisibility = size_divisibility - self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference - self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False) - self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False) - - # additional args - self.semantic_on = semantic_on - self.instance_on = instance_on - self.panoptic_on = panoptic_on - self.detection_on = detection_on - self.test_topk_per_image = test_topk_per_image - - self.text_tokenizer = Tokenize(SimpleTokenizer(), max_seq_len=max_seq_len) - self.task_tokenizer = Tokenize(SimpleTokenizer(), max_seq_len=task_seq_len) - self.is_demo = is_demo - - self.thing_indices = [k for k in self.metadata.thing_dataset_id_to_contiguous_id.keys()] - - if not self.semantic_on: - assert self.sem_seg_postprocess_before_inference - - @classmethod - def from_config(cls, cfg): - backbone = build_backbone(cfg) - sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape()) - - if cfg.MODEL.IS_TRAIN: - text_encoder = TextTransformer(context_length=cfg.MODEL.TEXT_ENCODER.CONTEXT_LENGTH, - width=cfg.MODEL.TEXT_ENCODER.WIDTH, - layers=cfg.MODEL.TEXT_ENCODER.NUM_LAYERS, - vocab_size=cfg.MODEL.TEXT_ENCODER.VOCAB_SIZE) - text_projector = MLP(text_encoder.width, cfg.MODEL.ONE_FORMER.HIDDEN_DIM, - cfg.MODEL.ONE_FORMER.HIDDEN_DIM, cfg.MODEL.TEXT_ENCODER.PROJ_NUM_LAYERS) - if cfg.MODEL.TEXT_ENCODER.N_CTX > 0: - prompt_ctx = nn.Embedding(cfg.MODEL.TEXT_ENCODER.N_CTX, cfg.MODEL.TEXT_ENCODER.WIDTH) - else: - prompt_ctx = None - else: - text_encoder = None - text_projector = None - prompt_ctx = None - - task_mlp = MLP(cfg.INPUT.TASK_SEQ_LEN, cfg.MODEL.ONE_FORMER.HIDDEN_DIM, - cfg.MODEL.ONE_FORMER.HIDDEN_DIM, 2) - - # Loss parameters: - deep_supervision = cfg.MODEL.ONE_FORMER.DEEP_SUPERVISION - no_object_weight = cfg.MODEL.ONE_FORMER.NO_OBJECT_WEIGHT - - # loss weights - class_weight = cfg.MODEL.ONE_FORMER.CLASS_WEIGHT - dice_weight = cfg.MODEL.ONE_FORMER.DICE_WEIGHT - mask_weight = cfg.MODEL.ONE_FORMER.MASK_WEIGHT - contrastive_weight = cfg.MODEL.ONE_FORMER.CONTRASTIVE_WEIGHT - - # building criterion - matcher = HungarianMatcher( - cost_class=class_weight, - cost_mask=mask_weight, - cost_dice=dice_weight, - num_points=cfg.MODEL.ONE_FORMER.TRAIN_NUM_POINTS, - ) - - weight_dict = {"loss_ce": class_weight, "loss_mask": mask_weight, - "loss_dice": dice_weight, "loss_contrastive": contrastive_weight} - - - if deep_supervision: - dec_layers = cfg.MODEL.ONE_FORMER.DEC_LAYERS - aux_weight_dict = {} - for i in range(dec_layers - 1): - aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) - weight_dict.update(aux_weight_dict) - - losses = ["labels", "masks", "contrastive"] - - return { - "backbone": backbone, - "sem_seg_head": sem_seg_head, - "task_mlp": task_mlp, - "prompt_ctx": prompt_ctx, - "text_encoder": text_encoder, - "text_projector": text_projector, - "num_queries": cfg.MODEL.ONE_FORMER.NUM_OBJECT_QUERIES, - "object_mask_threshold": cfg.MODEL.TEST.OBJECT_MASK_THRESHOLD, - "overlap_threshold": cfg.MODEL.TEST.OVERLAP_THRESHOLD, - "metadata": MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), - "size_divisibility": cfg.MODEL.ONE_FORMER.SIZE_DIVISIBILITY, - "sem_seg_postprocess_before_inference": ( - cfg.MODEL.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE - or cfg.MODEL.TEST.PANOPTIC_ON - or cfg.MODEL.TEST.INSTANCE_ON - ), - "pixel_mean": cfg.MODEL.PIXEL_MEAN, - "pixel_std": cfg.MODEL.PIXEL_STD, - # inference - "semantic_on": cfg.MODEL.TEST.SEMANTIC_ON, - "instance_on": cfg.MODEL.TEST.INSTANCE_ON, - "panoptic_on": cfg.MODEL.TEST.PANOPTIC_ON, - "detection_on": cfg.MODEL.TEST.DETECTION_ON, - "test_topk_per_image": cfg.TEST.DETECTIONS_PER_IMAGE, - "task_seq_len": cfg.INPUT.TASK_SEQ_LEN, - "max_seq_len": cfg.INPUT.MAX_SEQ_LEN, - "is_demo": cfg.MODEL.IS_DEMO, - } - - @property - def device(self): - return self.pixel_mean.device - - def encode_text(self, text): - assert text.ndim in [2, 3], text.ndim - b = text.shape[0] - squeeze_dim = False - num_text = 1 - if text.ndim == 3: - num_text = text.shape[1] - text = rearrange(text, 'b n l -> (b n) l', n=num_text) - squeeze_dim = True - - # [B, C] - x = self.text_encoder(text) - - text_x = self.text_projector(x) - - if squeeze_dim: - text_x = rearrange(text_x, '(b n) c -> b n c', n=num_text) - if self.prompt_ctx is not None: - text_ctx = self.prompt_ctx.weight.unsqueeze(0).repeat(text_x.shape[0], 1, 1) - text_x = torch.cat([text_x, text_ctx], dim=1) - - return {"texts": text_x} - - def forward(self, batched_inputs): - """ - Args: - batched_inputs: a list, batched outputs of :class:`DatasetMapper`. - Each item in the list contains the inputs for one image. - For now, each item in the list is a dict that contains: - * "image": Tensor, image in (C, H, W) format. - * "instances": per-region ground truth - * Other information that's included in the original dicts, such as: - "height", "width" (int): the output resolution of the model (may be different - from input resolution), used in inference. - Returns: - list[dict]: - each dict has the results for one image. The dict contains the following keys: - * "sem_seg": - A Tensor that represents the - per-pixel segmentation prediced by the head. - The prediction has shape KxHxW that represents the logits of - each class for each pixel. - * "panoptic_seg": - A tuple that represent panoptic output - panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. - segments_info (list[dict]): Describe each segment in `panoptic_seg`. - Each dict contains keys "id", "category_id", "isthing". - """ - images = [x["image"].to(self.device) for x in batched_inputs] - images = [(x - self.pixel_mean) / self.pixel_std for x in images] - images = ImageList.from_tensors(images, self.size_divisibility) - - tasks = torch.cat([self.task_tokenizer(x["task"]).to(self.device).unsqueeze(0) for x in batched_inputs], dim=0) - tasks = self.task_mlp(tasks.float()) - - features = self.backbone(images.tensor) - outputs = self.sem_seg_head(features, tasks) - - if self.training: - texts = torch.cat([self.text_tokenizer(x["text"]).to(self.device).unsqueeze(0) for x in batched_inputs], dim=0) - texts_x = self.encode_text(texts) - - outputs = {**outputs, **texts_x} - - # mask classification target - if "instances" in batched_inputs[0]: - gt_instances = [x["instances"].to(self.device) for x in batched_inputs] - targets = self.prepare_targets(gt_instances, images) - else: - targets = None - - # bipartite matching-based loss - losses = self.criterion(outputs, targets) - - for k in list(losses.keys()): - if k in self.criterion.weight_dict: - losses[k] *= self.criterion.weight_dict[k] - else: - # remove this loss if not specified in `weight_dict` - losses.pop(k) - return losses - else: - mask_cls_results = outputs["pred_logits"] - mask_pred_results = outputs["pred_masks"] - # upsample masks - mask_pred_results = F.interpolate( - mask_pred_results, - size=(images.tensor.shape[-2], images.tensor.shape[-1]), - mode="bilinear", - align_corners=False, - ) - - del outputs - - processed_results = [] - for i, data in enumerate(zip( - mask_cls_results, mask_pred_results, batched_inputs, images.image_sizes - )): - mask_cls_result, mask_pred_result, input_per_image, image_size = data - height = input_per_image.get("height", image_size[0]) - width = input_per_image.get("width", image_size[1]) - processed_results.append({}) - - if self.sem_seg_postprocess_before_inference: - mask_pred_result = retry_if_cuda_oom(sem_seg_postprocess)( - mask_pred_result, image_size, height, width - ) - mask_cls_result = mask_cls_result.to(mask_pred_result) - - # semantic segmentation inference - if self.semantic_on: - r = retry_if_cuda_oom(self.semantic_inference)(mask_cls_result, mask_pred_result) - if not self.sem_seg_postprocess_before_inference: - r = retry_if_cuda_oom(sem_seg_postprocess)(r, image_size, height, width) - processed_results[-1]["sem_seg"] = r - - # panoptic segmentation inference - if self.panoptic_on: - panoptic_r = retry_if_cuda_oom(self.panoptic_inference)(mask_cls_result, mask_pred_result) - processed_results[-1]["panoptic_seg"] = panoptic_r - - # instance segmentation inference - if self.instance_on: - instance_r = retry_if_cuda_oom(self.instance_inference)(mask_cls_result, mask_pred_result) - processed_results[-1]["instances"] = instance_r - - if self.detection_on: - bbox_r = retry_if_cuda_oom(self.instance_inference)(mask_cls_result, mask_pred_result) - processed_results[-1]["box_instances"] = bbox_r - - return processed_results - - def prepare_targets(self, targets, images): - h_pad, w_pad = images.tensor.shape[-2:] - new_targets = [] - for targets_per_image in targets: - # pad gt - gt_masks = targets_per_image.gt_masks - padded_masks = torch.zeros((gt_masks.shape[0], h_pad, w_pad), dtype=gt_masks.dtype, device=gt_masks.device) - padded_masks[:, : gt_masks.shape[1], : gt_masks.shape[2]] = gt_masks - new_targets.append( - { - "labels": targets_per_image.gt_classes, - "masks": padded_masks, - } - ) - return new_targets - - def semantic_inference(self, mask_cls, mask_pred): - mask_cls = F.softmax(mask_cls, dim=-1)[..., :-1] - mask_pred = mask_pred.sigmoid() - semseg = torch.einsum("qc,qhw->chw", mask_cls, mask_pred) - return semseg - - def panoptic_inference(self, mask_cls, mask_pred): - scores, labels = F.softmax(mask_cls, dim=-1).max(-1) - mask_pred = mask_pred.sigmoid() - - keep = labels.ne(self.sem_seg_head.num_classes) & (scores > self.object_mask_threshold) - cur_scores = scores[keep] - cur_classes = labels[keep] - cur_masks = mask_pred[keep] - cur_mask_cls = mask_cls[keep] - cur_mask_cls = cur_mask_cls[:, :-1] - - cur_prob_masks = cur_scores.view(-1, 1, 1) * cur_masks - - h, w = cur_masks.shape[-2:] - panoptic_seg = torch.zeros((h, w), dtype=torch.int32, device=cur_masks.device) - segments_info = [] - - current_segment_id = 0 - - if cur_masks.shape[0] == 0: - # We didn't detect any mask :( - return panoptic_seg, segments_info - else: - # take argmax - cur_mask_ids = cur_prob_masks.argmax(0) - stuff_memory_list = {} - for k in range(cur_classes.shape[0]): - pred_class = cur_classes[k].item() - isthing = pred_class in self.metadata.thing_dataset_id_to_contiguous_id.values() - mask_area = (cur_mask_ids == k).sum().item() - original_area = (cur_masks[k] >= 0.5).sum().item() - mask = (cur_mask_ids == k) & (cur_masks[k] >= 0.5) - - if mask_area > 0 and original_area > 0 and mask.sum().item() > 0: - if mask_area / original_area < self.overlap_threshold: - continue - - # merge stuff regions - if not isthing: - if int(pred_class) in stuff_memory_list.keys(): - panoptic_seg[mask] = stuff_memory_list[int(pred_class)] - continue - else: - stuff_memory_list[int(pred_class)] = current_segment_id + 1 - - current_segment_id += 1 - panoptic_seg[mask] = current_segment_id - - segments_info.append( - { - "id": current_segment_id, - "isthing": bool(isthing), - "category_id": int(pred_class), - } - ) - - return panoptic_seg, segments_info - - def instance_inference(self, mask_cls, mask_pred): - # mask_pred is already processed to have the same shape as original input - image_size = mask_pred.shape[-2:] - - # [Q, K] - scores = F.softmax(mask_cls, dim=-1)[:, :-1] - labels = torch.arange(self.sem_seg_head.num_classes, device=self.device).unsqueeze(0).repeat(self.num_queries, 1).flatten(0, 1) - - # scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.num_queries, sorted=False) - scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.test_topk_per_image, sorted=False) - labels_per_image = labels[topk_indices] - - topk_indices = topk_indices // self.sem_seg_head.num_classes - # mask_pred = mask_pred.unsqueeze(1).repeat(1, self.sem_seg_head.num_classes, 1).flatten(0, 1) - mask_pred = mask_pred[topk_indices] - - # Only consider scores with confidence over [self.object_mask_threshold] for demo - if self.is_demo: - keep = scores_per_image > self.object_mask_threshold - scores_per_image = scores_per_image[keep] - labels_per_image = labels_per_image[keep] - mask_pred = mask_pred[keep] - - # if this is panoptic segmentation, we only keep the "thing" classes - if self.panoptic_on: - keep = torch.zeros_like(scores_per_image).bool() - for i, lab in enumerate(labels_per_image): - keep[i] = lab in self.metadata.thing_dataset_id_to_contiguous_id.values() - - scores_per_image = scores_per_image[keep] - labels_per_image = labels_per_image[keep] - mask_pred = mask_pred[keep] - - if 'ade20k' in self.metadata.name: - for i in range(labels_per_image.shape[0]): - labels_per_image[i] = self.thing_indices.index(labels_per_image[i].item()) - - result = Instances(image_size) - # mask (before sigmoid) - result.pred_masks = (mask_pred > 0).float() - if self.detection_on: - # Uncomment the following to get boxes from masks (this is slow) - result.pred_boxes = BitMasks(mask_pred > 0).get_bounding_boxes() - else: - result.pred_boxes = Boxes(torch.zeros(mask_pred.size(0), 4)) - - # calculate average mask prob - mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * result.pred_masks.flatten(1)).sum(1) / (result.pred_masks.flatten(1).sum(1) + 1e-6) - result.scores = scores_per_image * mask_scores_per_image - result.pred_classes = labels_per_image - return result \ No newline at end of file diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/lib_support/src/main/java/org/tensorflow/lite/examples/classification/tflite/ClassifierQuantizedEfficientNet.java b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/lib_support/src/main/java/org/tensorflow/lite/examples/classification/tflite/ClassifierQuantizedEfficientNet.java deleted file mode 100644 index d0d62f58d18333b6360ec30a4c85c9f1d38955ce..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/lib_support/src/main/java/org/tensorflow/lite/examples/classification/tflite/ClassifierQuantizedEfficientNet.java +++ /dev/null @@ -1,71 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -package org.tensorflow.lite.examples.classification.tflite; - -import android.app.Activity; -import java.io.IOException; -import org.tensorflow.lite.support.common.TensorOperator; -import org.tensorflow.lite.support.common.ops.NormalizeOp; - -/** This TensorFlow Lite classifier works with the quantized EfficientNet model. */ -public class ClassifierQuantizedEfficientNet extends Classifier { - - /** - * The quantized model does not require normalization, thus set mean as 0.0f, and std as 1.0f to - * bypass the normalization. - */ - private static final float IMAGE_MEAN = 0.0f; - - private static final float IMAGE_STD = 1.0f; - - /** Quantized MobileNet requires additional dequantization to the output probability. */ - private static final float PROBABILITY_MEAN = 0.0f; - - private static final float PROBABILITY_STD = 255.0f; - - /** - * Initializes a {@code ClassifierQuantizedMobileNet}. - * - * @param activity - */ - public ClassifierQuantizedEfficientNet(Activity activity, Device device, int numThreads) - throws IOException { - super(activity, device, numThreads); - } - - @Override - protected String getModelPath() { - // you can download this file from - // see build.gradle for where to obtain this file. It should be auto - // downloaded into assets. - return "model_quant.tflite"; - } - - @Override - protected String getLabelPath() { - return "labels_without_background.txt"; - } - - @Override - protected TensorOperator getPreprocessNormalizeOp() { - return new NormalizeOp(IMAGE_MEAN, IMAGE_STD); - } - - @Override - protected TensorOperator getPostprocessNormalizeOp() { - return new NormalizeOp(PROBABILITY_MEAN, PROBABILITY_STD); - } -} diff --git a/spaces/cymic/Waifu_Diffusion_Webui/modules/textual_inversion/preprocess.py b/spaces/cymic/Waifu_Diffusion_Webui/modules/textual_inversion/preprocess.py deleted file mode 100644 index 5f9835f43955674e18035d021b92ea239c12a569..0000000000000000000000000000000000000000 --- a/spaces/cymic/Waifu_Diffusion_Webui/modules/textual_inversion/preprocess.py +++ /dev/null @@ -1,104 +0,0 @@ -import os -from PIL import Image, ImageOps -import platform -import sys -import tqdm - -from modules import shared, images - - -def preprocess(process_src, process_dst, process_flip, process_split, process_caption): - size = 512 - src = os.path.abspath(process_src) - dst = os.path.abspath(process_dst) - - assert src != dst, 'same directory specified as source and destination' - - os.makedirs(dst, exist_ok=True) - - files = os.listdir(src) - - shared.state.textinfo = "Preprocessing..." - shared.state.job_count = len(files) - - if process_caption: - shared.interrogator.load() - - def save_pic_with_caption(image, index): - if process_caption: - caption = "-" + shared.interrogator.generate_caption(image) - caption = sanitize_caption(os.path.join(dst, f"{index:05}-{subindex[0]}"), caption, ".png") - else: - caption = filename - caption = os.path.splitext(caption)[0] - caption = os.path.basename(caption) - - image.save(os.path.join(dst, f"{index:05}-{subindex[0]}{caption}.png")) - subindex[0] += 1 - - def save_pic(image, index): - save_pic_with_caption(image, index) - - if process_flip: - save_pic_with_caption(ImageOps.mirror(image), index) - - for index, imagefile in enumerate(tqdm.tqdm(files)): - subindex = [0] - filename = os.path.join(src, imagefile) - img = Image.open(filename).convert("RGB") - - if shared.state.interrupted: - break - - ratio = img.height / img.width - is_tall = ratio > 1.35 - is_wide = ratio < 1 / 1.35 - - if process_split and is_tall: - img = img.resize((size, size * img.height // img.width)) - - top = img.crop((0, 0, size, size)) - save_pic(top, index) - - bot = img.crop((0, img.height - size, size, img.height)) - save_pic(bot, index) - elif process_split and is_wide: - img = img.resize((size * img.width // img.height, size)) - - left = img.crop((0, 0, size, size)) - save_pic(left, index) - - right = img.crop((img.width - size, 0, img.width, size)) - save_pic(right, index) - else: - img = images.resize_image(1, img, size, size) - save_pic(img, index) - - shared.state.nextjob() - - if process_caption: - shared.interrogator.send_blip_to_ram() - -def sanitize_caption(base_path, original_caption, suffix): - operating_system = platform.system().lower() - if (operating_system == "windows"): - invalid_path_characters = "\\/:*?\"<>|" - max_path_length = 259 - else: - invalid_path_characters = "/" #linux/macos - max_path_length = 1023 - caption = original_caption - for invalid_character in invalid_path_characters: - caption = caption.replace(invalid_character, "") - fixed_path_length = len(base_path) + len(suffix) - if fixed_path_length + len(caption) <= max_path_length: - return caption - caption_tokens = caption.split() - new_caption = "" - for token in caption_tokens: - last_caption = new_caption - new_caption = new_caption + token + " " - if (len(new_caption) + fixed_path_length - 1 > max_path_length): - break - print(f"\nPath will be too long. Truncated caption: {original_caption}\nto: {last_caption}", file=sys.stderr) - return last_caption.strip() diff --git a/spaces/cynika/taffy/utils.py b/spaces/cynika/taffy/utils.py deleted file mode 100644 index 3733a75111dc89cefa333b34933ae01623550ea7..0000000000000000000000000000000000000000 --- a/spaces/cynika/taffy/utils.py +++ /dev/null @@ -1,338 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess - -import librosa -import numpy as np -import torchaudio -from scipy.io.wavfile import read -import torch -import torchvision -from torch.nn import functional as F -from commons import sequence_mask -from hubert import hubert_model -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - -f0_bin = 256 -f0_max = 1100.0 -f0_min = 50.0 -f0_mel_min = 1127 * np.log(1 + f0_min / 700) -f0_mel_max = 1127 * np.log(1 + f0_max / 700) - -def f0_to_coarse(f0): - is_torch = isinstance(f0, torch.Tensor) - f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1 - - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1 - f0_coarse = (f0_mel + 0.5).long() if is_torch else np.rint(f0_mel).astype(np.int) - assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min()) - return f0_coarse - - -def get_hubert_model(rank=None): - - hubert_soft = hubert_model.hubert_soft("hubert/hubert-soft-0d54a1f4.pt") - if rank is not None: - hubert_soft = hubert_soft.cuda(rank) - return hubert_soft - -def get_hubert_content(hmodel, y=None, path=None): - if path is not None: - source, sr = torchaudio.load(path) - source = torchaudio.functional.resample(source, sr, 16000) - if len(source.shape) == 2 and source.shape[1] >= 2: - source = torch.mean(source, dim=0).unsqueeze(0) - else: - source = y - source = source.unsqueeze(0) - with torch.inference_mode(): - units = hmodel.units(source) - return units.transpose(1,2) - - -def get_content(cmodel, y): - with torch.no_grad(): - c = cmodel.extract_features(y.squeeze(1))[0] - c = c.transpose(1, 2) - return c - - - -def transform(mel, height): # 68-92 - #r = np.random.random() - #rate = r * 0.3 + 0.85 # 0.85-1.15 - #height = int(mel.size(-2) * rate) - tgt = torchvision.transforms.functional.resize(mel, (height, mel.size(-1))) - if height >= mel.size(-2): - return tgt[:, :mel.size(-2), :] - else: - silence = tgt[:,-1:,:].repeat(1,mel.size(-2)-height,1) - silence += torch.randn_like(silence) / 10 - return torch.cat((tgt, silence), 1) - - -def stretch(mel, width): # 0.5-2 - return torchvision.transforms.functional.resize(mel, (mel.size(-2), width)) - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if iteration is None: - iteration = 1 - if learning_rate is None: - learning_rate = 0.0002 - if optimizer is not None and checkpoint_dict['optimizer'] is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict= {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})" .format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - # ckptname = checkpoint_path.split(os.sep)[-1] - # newest_step = int(ckptname.split(".")[0].split("_")[1]) - # val_steps = 2000 - # last_ckptname = checkpoint_path.replace(str(newest_step), str(newest_step - val_steps*3)) - # if newest_step >= val_steps*3: - # os.system(f"rm {last_ckptname}") - logger.info("Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path)) - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save({'model': state_dict, - 'iteration': iteration, - 'optimizer': optimizer.state_dict(), - 'learning_rate': learning_rate}, checkpoint_path) - - -def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats='HWC') - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10,2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams =HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() - diff --git a/spaces/daddyjin/TalkingFaceGeneration/FONT/script_test_liujin.sh b/spaces/daddyjin/TalkingFaceGeneration/FONT/script_test_liujin.sh deleted file mode 100644 index 5beff8b035923f42ffddb2205d3fe3b540f73871..0000000000000000000000000000000000000000 --- a/spaces/daddyjin/TalkingFaceGeneration/FONT/script_test_liujin.sh +++ /dev/null @@ -1,9 +0,0 @@ -CUDA_VISIBLE_DEVICES=1 \ -python demo.py \ ---source_image "/data/liujin/dataset/LRW/lipread_frames/ABOUT/test/ABOUT_00014/000000.jpg" \ ---in_file "/data/liujin/EAMM-main/test/audio/icme_intro.mp3" \ ---pose_file "/data/liujin/dataset/LRW/lipread_3DDFA_pose/ABOUT/test/ABOUT_00014/000000.npy" \ ---pose_given "/data/liujin/dataset/preprocess/LRW/data_file/test_pose_long_3ddfa_hdtf.npy" \ ---pose_long TRUE - - diff --git a/spaces/datasciencedojo/Hand-Keypoint-Detection-Realtime/README.md b/spaces/datasciencedojo/Hand-Keypoint-Detection-Realtime/README.md deleted file mode 100644 index 52b8416b150e7b811f09b81ee26159db1533495b..0000000000000000000000000000000000000000 --- a/spaces/datasciencedojo/Hand-Keypoint-Detection-Realtime/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Hand Keypoint Detection Realtime -emoji: 🔥 -colorFrom: pink -colorTo: blue -sdk: gradio -sdk_version: 3.4.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/PIL/PixarImagePlugin.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/PIL/PixarImagePlugin.py deleted file mode 100644 index 7eb82228a9928bac325f641d45346364c61e8092..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/PIL/PixarImagePlugin.py +++ /dev/null @@ -1,69 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# PIXAR raster support for PIL -# -# history: -# 97-01-29 fl Created -# -# notes: -# This is incomplete; it is based on a few samples created with -# Photoshop 2.5 and 3.0, and a summary description provided by -# Greg Coats . Hopefully, "L" and -# "RGBA" support will be added in future versions. -# -# Copyright (c) Secret Labs AB 1997. -# Copyright (c) Fredrik Lundh 1997. -# -# See the README file for information on usage and redistribution. -# - -from . import Image, ImageFile -from ._binary import i16le as i16 - -# -# helpers - - -def _accept(prefix): - return prefix[:4] == b"\200\350\000\000" - - -## -# Image plugin for PIXAR raster images. - - -class PixarImageFile(ImageFile.ImageFile): - format = "PIXAR" - format_description = "PIXAR raster image" - - def _open(self): - # assuming a 4-byte magic label - s = self.fp.read(4) - if not _accept(s): - msg = "not a PIXAR file" - raise SyntaxError(msg) - - # read rest of header - s = s + self.fp.read(508) - - self._size = i16(s, 418), i16(s, 416) - - # get channel/depth descriptions - mode = i16(s, 424), i16(s, 426) - - if mode == (14, 2): - self.mode = "RGB" - # FIXME: to be continued... - - # create tile descriptor (assuming "dumped") - self.tile = [("raw", (0, 0) + self.size, 1024, (self.mode, 0, 1))] - - -# -# -------------------------------------------------------------------- - -Image.register_open(PixarImageFile.format, PixarImageFile, _accept) - -Image.register_extension(PixarImageFile.format, ".pxr") diff --git a/spaces/dcq/freegpt-webui/client/css/field.css b/spaces/dcq/freegpt-webui/client/css/field.css deleted file mode 100644 index 914425a75d9e62e6428bdb8f5de2c66c91f10d33..0000000000000000000000000000000000000000 --- a/spaces/dcq/freegpt-webui/client/css/field.css +++ /dev/null @@ -1,11 +0,0 @@ -.field { - display: flex; - align-items: center; - padding: 4px; -} - -@media screen and (max-width: 990px) { - .field { - flex-wrap: nowrap; - } -} diff --git a/spaces/ddstua/Enhance_Low_Light_Image/README.md b/spaces/ddstua/Enhance_Low_Light_Image/README.md deleted file mode 100644 index 81f26a5775f791b42f790557a96c9f0e5916b6db..0000000000000000000000000000000000000000 --- a/spaces/ddstua/Enhance_Low_Light_Image/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Enhance Low Light Image with MIRNet -emoji: 🎇 -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.9 -app_file: app.py -pinned: false -duplicated_from: keras-io/Enhance_Low_Light_Image ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/deeplearning/audioldm-text-to-audio-generation/audioldm/clap/training/infer_demo.py b/spaces/deeplearning/audioldm-text-to-audio-generation/audioldm/clap/training/infer_demo.py deleted file mode 100644 index 7d1f4784898dbfeb69affefb6f624711adc8cb42..0000000000000000000000000000000000000000 --- a/spaces/deeplearning/audioldm-text-to-audio-generation/audioldm/clap/training/infer_demo.py +++ /dev/null @@ -1,105 +0,0 @@ -import sys - -import os -import torch -import librosa -from open_clip import create_model -from training.data import get_audio_features -from training.data import int16_to_float32, float32_to_int16 -from transformers import RobertaTokenizer - -tokenize = RobertaTokenizer.from_pretrained("roberta-base") - - -def tokenizer(text): - result = tokenize( - text, - padding="max_length", - truncation=True, - max_length=77, - return_tensors="pt", - ) - return {k: v.squeeze(0) for k, v in result.items()} - - -PRETRAINED_PATH = "/mnt/fast/nobackup/users/hl01486/projects/contrastive_pretraining/CLAP/assets/checkpoints/epoch_top_0_audioset_no_fusion.pt" -WAVE_48k_PATH = "/mnt/fast/nobackup/users/hl01486/projects/contrastive_pretraining/CLAP/assets/audio/machine.wav" - - -def infer_text(): - device = "cuda:0" if torch.cuda.is_available() else "cpu" - precision = "fp32" - amodel = "HTSAT-tiny" # or 'PANN-14' - tmodel = "roberta" # the best text encoder in our training - enable_fusion = False # False if you do not want to use the fusion model - fusion_type = "aff_2d" - pretrained = PRETRAINED_PATH - - model, model_cfg = create_model( - amodel, - tmodel, - pretrained, - precision=precision, - device=device, - enable_fusion=enable_fusion, - fusion_type=fusion_type, - ) - # load the text, can be a list (i.e. batch size) - text_data = ["I love the contrastive learning", "I love the pretrain model"] - # tokenize for roberta, if you want to tokenize for another text encoder, please refer to data.py#L43-90 - text_data = tokenizer(text_data) - - text_embed = model.get_text_embedding(text_data) - print(text_embed.size()) - - -def infer_audio(): - - device = "cuda:0" if torch.cuda.is_available() else "cpu" - precision = "fp32" - amodel = "HTSAT-tiny" # or 'PANN-14' - tmodel = "roberta" # the best text encoder in our training - enable_fusion = False # False if you do not want to use the fusion model - fusion_type = "aff_2d" - pretrained = PRETRAINED_PATH - - model, model_cfg = create_model( - amodel, - tmodel, - pretrained, - precision=precision, - device=device, - enable_fusion=enable_fusion, - fusion_type=fusion_type, - ) - - # load the waveform of the shape (T,), should resample to 48000 - audio_waveform, sr = librosa.load(WAVE_48k_PATH, sr=48000) - # quantize - audio_waveform = int16_to_float32(float32_to_int16(audio_waveform)) - audio_waveform = torch.from_numpy(audio_waveform).float() - audio_dict = {} - - # the 'fusion' truncate mode can be changed to 'rand_trunc' if run in unfusion mode - import ipdb - - ipdb.set_trace() - audio_dict = get_audio_features( - audio_dict, - audio_waveform, - 480000, - data_truncating="fusion", - data_filling="repeatpad", - audio_cfg=model_cfg["audio_cfg"], - ) - # can send a list to the model, to process many audio tracks in one time (i.e. batch size) - audio_embed = model.get_audio_embedding([audio_dict]) - print(audio_embed.size()) - import ipdb - - ipdb.set_trace() - - -if __name__ == "__main__": - infer_text() - infer_audio() diff --git a/spaces/deepwisdom/MetaGPT/metagpt/web/app.py b/spaces/deepwisdom/MetaGPT/metagpt/web/app.py deleted file mode 100644 index 5df702fbb9e996def8a93a5a05e2fa938cd2f7af..0000000000000000000000000000000000000000 --- a/spaces/deepwisdom/MetaGPT/metagpt/web/app.py +++ /dev/null @@ -1,224 +0,0 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- -import asyncio -import urllib.parse -from datetime import datetime -import uuid -from enum import Enum - -from fastapi import FastAPI, Request, HTTPException -from fastapi.responses import StreamingResponse, RedirectResponse -from fastapi.staticfiles import StaticFiles -import fire -from pydantic import BaseModel, Field -import uvicorn - -from typing import Any, Optional - -from metagpt import Message -from metagpt.actions.action import Action -from metagpt.actions.action_output import ActionOutput -from metagpt.config import CONFIG - -from metagpt.roles.software_company import RoleRun, SoftwareCompany - - -class QueryAnswerType(Enum): - Query = "Q" - Answer = "A" - - -class SentenceType(Enum): - TEXT = "text" - HIHT = "hint" - ACTION = "action" - - -class MessageStatus(Enum): - COMPLETE = "complete" - - -class SentenceValue(BaseModel): - answer: str - - -class Sentence(BaseModel): - type: str - id: Optional[str] = None - value: SentenceValue - is_finished: Optional[bool] = None - - -class Sentences(BaseModel): - id: Optional[str] = None - action: Optional[str] = None - role: Optional[str] = None - skill: Optional[str] = None - description: Optional[str] = None - timestamp: str = datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f%z") - status: str - contents: list[dict] - - -class NewMsg(BaseModel): - """Chat with MetaGPT""" - - query: str = Field(description="Problem description") - config: dict[str, Any] = Field(description="Configuration information") - - -class ErrorInfo(BaseModel): - error: str = None - traceback: str = None - - -class ThinkActStep(BaseModel): - id: str - status: str - title: str - timestamp: str - description: str - content: Sentence = None - - -class ThinkActPrompt(BaseModel): - message_id: int = None - timestamp: str = datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f%z") - step: ThinkActStep = None - skill: Optional[str] = None - role: Optional[str] = None - - def update_think(self, tc_id, action: Action): - self.step = ThinkActStep( - id=str(tc_id), - status="running", - title=action.desc, - timestamp=datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f%z"), - description=action.desc, - ) - - def update_act(self, message: ActionOutput): - self.step.status = "finish" - self.step.content = Sentence( - type="text", - id=ThinkActPrompt.guid32(), - value=SentenceValue(answer=message.content), - is_finished=True, - ) - - @staticmethod - def guid32(): - return str(uuid.uuid4()).replace("-", "")[0:32] - - @property - def prompt(self): - v = self.json(exclude_unset=True) - return urllib.parse.quote(v) - - -class MessageJsonModel(BaseModel): - steps: list[Sentences] - qa_type: str - created_at: datetime = datetime.now() - query_time: datetime = datetime.now() - answer_time: datetime = datetime.now() - score: Optional[int] = None - feedback: Optional[str] = None - - def add_think_act(self, think_act_prompt: ThinkActPrompt): - s = Sentences( - action=think_act_prompt.step.title, - skill=think_act_prompt.skill, - description=think_act_prompt.step.description, - timestamp=think_act_prompt.timestamp, - status=think_act_prompt.step.status, - contents=[think_act_prompt.step.content.dict()], - ) - self.steps.append(s) - - @property - def prompt(self): - v = self.json(exclude_unset=True) - return urllib.parse.quote(v) - - -async def create_message(req_model: NewMsg, request: Request): - """ - Session message stream - """ - config = {k.upper(): v for k, v in req_model.config.items()} - CONFIG.set_context(config) - role = SoftwareCompany() - role.recv(message=Message(content=req_model.query)) - answer = MessageJsonModel( - steps=[ - Sentences( - contents=[ - Sentence(type=SentenceType.TEXT.value, value=SentenceValue(answer=req_model.query), is_finished=True) - ], - status=MessageStatus.COMPLETE.value, - ) - ], - qa_type=QueryAnswerType.Answer.value, - ) - - tc_id = 0 - - while True: - tc_id += 1 - if request and await request.is_disconnected(): - return - think_result: RoleRun = await role.think() - if not think_result: # End of conversion - break - think_act_prompt = ThinkActPrompt(role=think_result.role.profile) - think_act_prompt.update_think(tc_id, think_result) - yield think_act_prompt.prompt + "\n\n" - act_result = await role.act() - think_act_prompt.update_act(act_result) - yield think_act_prompt.prompt + "\n\n" - answer.add_think_act(think_act_prompt) - yield answer.prompt + "\n\n" # Notify the front-end that the message is complete. - - -class ChatHandler: - @staticmethod - async def create_message(req_model: NewMsg, request: Request): - """Message stream, using SSE.""" - event = create_message(req_model, request) - headers = {"Cache-Control": "no-cache", "Connection": "keep-alive"} - return StreamingResponse(event, headers=headers, media_type="text/event-stream") - - -app = FastAPI() - -app.mount( - "/static", - StaticFiles(directory="./metagpt/static/", check_dir=True), - name="static", -) -app.add_api_route( - "/api/messages", - endpoint=ChatHandler.create_message, - methods=["post"], - summary="Session message sending (streaming response)", -) - - -@app.get("/{catch_all:path}") -async def catch_all(request: Request): - if request.url.path == "/": - return RedirectResponse(url="/static/index.html") - if request.url.path.startswith("/api"): - raise HTTPException(status_code=404) - - new_path = f"/static{request.url.path}" - return RedirectResponse(url=new_path) - - -def main(): - uvicorn.run(app="__main__:app", host="0.0.0.0", port=7860) - - -if __name__ == "__main__": - fire.Fire(main) diff --git a/spaces/diacanFperku/AutoGPT/Adobe Photoshop CC 2019 V20.0.0 Multilingual Crack [TechTools] Downloadl [Extra Quality].md b/spaces/diacanFperku/AutoGPT/Adobe Photoshop CC 2019 V20.0.0 Multilingual Crack [TechTools] Downloadl [Extra Quality].md deleted file mode 100644 index 2b0c952a97b8cb95ad28ab9db9af7834bac985e6..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Adobe Photoshop CC 2019 V20.0.0 Multilingual Crack [TechTools] Downloadl [Extra Quality].md +++ /dev/null @@ -1,14 +0,0 @@ -

Adobe Photoshop CC 2019 V20.0.0 Multilingual Crack [TechTools] Downloadl


DOWNLOAD ✯✯✯ https://gohhs.com/2uFVvO



-
-April 10, 2019 - Adobe Illustrator CC 2019 (HKLM-x32\\. . c:\\users\\yaokeroa\\downloads\\adobe photoshop lightroom cc 6.5 .1 multilingual + crack\\atomix virtual .Adobe Photoshop Lightroom CC 6.5.1 Multi\\Rus. . -Adobe Photoshop Lightroom CC 6.5.1 Multi\\Rus. . -Adobe CS6 Master Collection Update 2 + Crack. -Adobe Photoshop CS6 Extended 2013 PC. -Screenshots. -Screen 1. Adobe Photoshop CC (v2017.1 Update 1) | Repack by Diakov. -Adobe Photoshop Lightroom CC 2018.1.1 - . -Download Adobe Photoshop Lightroom CC 6.1. -Adobe Photoshop CS6 Master Collection Update 2 + Crack. 8a78ff9644
-
-
-

diff --git a/spaces/diacanFperku/AutoGPT/Ankhiyon Se Goli Maare In Hindi Torrent Download 720p.md b/spaces/diacanFperku/AutoGPT/Ankhiyon Se Goli Maare In Hindi Torrent Download 720p.md deleted file mode 100644 index c35f762b0f59e7ca8a1d73e741f18ff0816b6431..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Ankhiyon Se Goli Maare In Hindi Torrent Download 720p.md +++ /dev/null @@ -1,87 +0,0 @@ -
-

Ankhiyon Se Goli Maare in hindi torrent download 720p

-

If you are a fan of Bollywood comedy movies, you might have heard of Ankhiyon Se Goli Maare, a 2002 film starring Govinda and Raveena Tandon. The film is a hilarious story of a poor man who falls in love with a rich girl and faces many obstacles from her father and his rival. The film is full of funny scenes, songs, and dialogues that will make you laugh out loud.

-

But what if you want to watch Ankhiyon Se Goli Maare in high quality and with subtitles? You might not find it on any streaming platform or DVD store. That's why you need to download Ankhiyon Se Goli Maare in hindi torrent download 720p. This is a file that contains the movie in HD quality and with English subtitles. You can download it from various websites and enjoy the movie on your device.

-

Ankhiyon Se Goli Maare in hindi torrent download 720p


Download Ziphttps://gohhs.com/2uFVDj



-

How to download Ankhiyon Se Goli Maare in hindi torrent download 720p

-

Downloading Ankhiyon Se Goli Maare in hindi torrent download 720p is easy and simple. You just need to follow these steps:

-
    -
  1. Go to one of the websites that offer Ankhiyon Se Goli Maare in hindi torrent download 720p. Some of them are Peatix, Fabianozan, SoundCloud, etc.
  2. -
  3. Search for Ankhiyon Se Goli Maare in hindi torrent download 720p using the search box or browse through the categories.
  4. -
  5. Select the file that suits your preferences and needs. Check the quality, size, format, and subtitles of the file before downloading.
  6. -
  7. Click on the download button and follow the instructions to complete the download process. You might need to use a torrent client software such as BitTorrent, uTorrent, or Vuze to download the file.
  8. -
  9. Enjoy watching Ankhiyon Se Goli Maare in hindi torrent download 720p on your computer, tablet, smartphone, or TV.
  10. -
-

Before you download Ankhiyon Se Goli Maare in hindi torrent download 720p, make sure you have a good internet connection and enough storage space on your device. Also, make sure you respect the copyright and terms of use of the source website.

-

Why you should watch Ankhiyon Se Goli Maare in hindi torrent download 720p

-

There are many reasons why you should watch Ankhiyon Se Goli Maare in hindi torrent download 720p. Here are some of them:

-
    -
  • You will enjoy a classic Bollywood comedy movie that will make you laugh and entertain you.
  • -
  • You will see Govinda and Raveena Tandon in their best performances as a comic pair.
  • -
  • You will listen to some catchy and popular songs such as "Ankhiyon Se Goli Maare", "Ladka Mud Mud Ke Maare", "O Chhori Gori Gori", etc.
  • -
  • You will learn some funny and witty dialogues that you can use in your conversations.
  • -
  • You will watch the movie in HD quality and with English subtitles that will enhance your viewing experience.
  • -
-

Ankhiyon Se Goli Maare in hindi torrent download 720p is a movie that you should not miss. It is a movie that will make you happy and relaxed. It is a movie that will make you a fan of Bollywood comedy movies.

-

Conclusion

-

Ankhiyon Se Goli Maare is a 2002 Bollywood comedy movie that stars Govinda and Raveena Tandon. The movie is a hilarious story of a poor man who falls in love with a rich girl and faces many obstacles from her father and his rival. The movie is full of funny scenes, songs, and dialogues that will make you laugh out loud.

-

If you want to watch Ankhiyon Se Goli Maare in high quality and with subtitles, you need to download Ankhiyon Se Goli Maare in hindi torrent download 720p. This is a file that contains the movie in HD quality and with English subtitles. You can download it from various websites and enjoy the movie on your device.

-

Downloading Ankhiyon Se Goli Maare in hindi torrent download 720p is easy and simple. You just need to follow some steps and use a torrent client software. Before you download the file, make sure you have a good internet connection and enough storage space on your device. Also, make sure you respect the copyright and terms of use of the source website.

-

Ankhiyon Se Goli Maare in hindi torrent download 720p is a movie that you should watch. It is a movie that will entertain you and make you laugh. It is a movie that will make you a fan of Bollywood comedy movies.

-

-

Download Ankhiyon Se Goli Maare in hindi torrent download 720p today and enjoy watching one of the best Bollywood comedy movies ever made.

-

What you will enjoy from Ankhiyon Se Goli Maare in hindi torrent download 720p

-

Ankhiyon Se Goli Maare in hindi torrent download 720p is a movie that will give you a lot of enjoyment and entertainment. By watching this movie, you will:

-
    -
  • Laugh at the hilarious comedy of Govinda and Raveena Tandon, who play a poor man and a rich girl who fall in love and face many challenges from their families and enemies.
  • -
  • Admire the chemistry and romance of Govinda and Raveena Tandon, who share some cute and funny moments together.
  • -
  • Appreciate the acting and dialogues of Kader Khan, Johnny Lever, Asrani, Shakti Kapoor, and other supporting actors, who play various roles such as a rich father, a rival gangster, a loyal friend, a funny servant, etc.
  • -
  • Enjoy the music and dance of Ankhiyon Se Goli Maare, which has some catchy and popular songs such as "Ankhiyon Se Goli Maare", "Ladka Mud Mud Ke Maare", "O Chhori Gori Gori", etc.
  • -
  • Experience the thrill and action of Ankhiyon Se Goli Maare, which has some exciting and funny scenes such as a car chase, a fight scene, a kidnapping scene, etc.
  • -
-

Ankhiyon Se Goli Maare in hindi torrent download 720p is a movie that will make you laugh, smile, and enjoy. It is a movie that will make you forget your worries and stress. It is a movie that will make you happy.

-
How to watch Ankhiyon Se Goli Maare in hindi torrent download 720p
-

Watching Ankhiyon Se Goli Maare in hindi torrent download 720p is easy and convenient. You can watch it on any device that supports video playback. You can watch it on your computer, tablet, smartphone, or TV. You can watch it alone or with your friends and family. You can watch it anytime and anywhere you want.

-

To watch Ankhiyon Se Goli Maare in hindi torrent download 720p, you should:

-
    -
  1. Download Ankhiyon Se Goli Maare in hindi torrent download 720p from one of the websites that offer it.
  2. -
  3. Open the file using a video player software such as VLC Media Player, Windows Media Player, or MX Player.
  4. -
  5. Select the language and subtitle options that you prefer.
  6. -
  7. Enjoy watching Ankhiyon Se Goli Maare in hindi torrent download 720p.
  8. -
-

Before you watch Ankhiyon Se Goli Maare in hindi torrent download 720p, make sure you have a good device and software that can play the file smoothly and clearly. Also, make sure you have enough time and space to watch the movie without any interruptions or distractions.

-
Conclusion
-

Ankhiyon Se Goli Maare is a 2002 Bollywood comedy movie that stars Govinda and Raveena Tandon. The movie is a hilarious story of a poor man who falls in love with a rich girl and faces many obstacles from her father and his rival. The movie is full of funny scenes, songs, and dialogues that will make you laugh out loud.

-

If you want to watch Ankhiyon Se Goli Maare in high quality and with subtitles, you need to download Ankhiyon Se Goli Maare in hindi torrent download 720p. This is a file that contains the movie in HD quality and with English subtitles. You can download it from various websites and enjoy the movie on your device.

-

Downloading Ankhiyon Se Goli Maare in hindi torrent download 720p is easy and simple. You just need to follow some steps and use a torrent client software. Before you download the file, make sure you have a good internet connection and enough storage space on your device. Also, make sure you respect the copyright and terms of use of the source website.

-

Ankhiyon Se Goli Maare in hindi torrent download 720p is a movie that you should watch. It is a movie that will entertain you and make you laugh. It is a movie that will make you a fan of Bollywood comedy movies.

-

Download Ankhiyon Se Goli Maare in hindi torrent download 720p today and enjoy watching one of the best Bollywood comedy movies ever made.

-What are the benefits of Ankhiyon Se Goli Maare in hindi torrent download 720p -

There are many benefits of Ankhiyon Se Goli Maare in hindi torrent download 720p. Some of them are:

-
    -
  • You will save money and time by downloading Ankhiyon Se Goli Maare in hindi torrent download 720p instead of buying or renting a DVD or a streaming subscription.
  • -
  • You will have more control and flexibility over your viewing experience by downloading Ankhiyon Se Goli Maare in hindi torrent download 720p. You can watch it whenever and wherever you want, pause and resume it, rewind and fast-forward it, adjust the volume and brightness, etc.
  • -
  • You will have more options and choices by downloading Ankhiyon Se Goli Maare in hindi torrent download 720p. You can choose the file that suits your preferences and needs, such as the quality, size, format, and subtitles.
  • -
  • You will have more security and privacy by downloading Ankhiyon Se Goli Maare in hindi torrent download 720p. You can avoid viruses, malware, spyware, ads, pop-ups, and other unwanted or harmful elements that might come with online streaming or downloading.
  • -
  • You will have more satisfaction and enjoyment by downloading Ankhiyon Se Goli Maare in hindi torrent download 720p. You can watch it in HD quality and with English subtitles that will enhance your viewing experience. You can also share it with your friends and family and have fun together.
  • -
-

Ankhiyon Se Goli Maare in hindi torrent download 720p is a file that will give you many benefits. It is a file that will make you happy and satisfied. It is a file that will make you a smart and savvy viewer.

-Tips for downloading Ankhiyon Se Goli Maare in hindi torrent download 720p -

Downloading Ankhiyon Se Goli Maare in hindi torrent download 720p is easy and simple, but you should also follow some tips to make it more effective and safe. Here are some tips for downloading Ankhiyon Se Goli Maare in hindi torrent download 720p:

-
    -
  • Use a reliable and reputable website that offers Ankhiyon Se Goli Maare in hindi torrent download 720p. You can check the reviews, ratings, comments, feedback, and reputation of the website before downloading from it.
  • -
  • Use a secure and stable internet connection that can support the download speed and bandwidth of Ankhiyon Se Goli Maare in hindi torrent download 720p. You can also use a VPN or proxy service to protect your identity and location while downloading.
  • -
  • Use a compatible and updated device and software that can play Ankhiyon Se Goli Maare in hindi torrent download 720p smoothly and clearly. You can also use an antivirus or firewall software to scan and protect your device from any potential threats or damages.
  • -
  • Use a legal and ethical way of downloading Ankhiyon Se Goli Maare in hindi torrent download 720p. You should respect the rights and interests of the creators, owners, distributors, and users of Ankhiyon Se Goli Maare. You should also follow the rules and regulations of your country or region regarding downloading torrents.
  • -
  • Use a responsible and respectful way of downloading Ankhiyon Se Goli Maare in hindi torrent download 720p. You should not download or share any files that are illegal, harmful, offensive, or inappropriate. You should also not download or share any files that might affect the quality or availability of Ankhiyon Se Goli Maare.
  • -
-

Ankhiyon Se Goli Maare in hindi torrent download 720p is a file that you should download with care and caution. You should follow these tips to make your downloading experience more effective and safe. You should also enjoy your downloading experience with gratitude and appreciation.

-Summary -

Ankhiyon Se Goli Maare in hindi torrent download 720p is a file that contains the 2002 Bollywood comedy movie Ankhiyon Se Goli Maare in HD quality and with English subtitles. The movie stars Govinda and Raveena Tandon as a poor man and a rich girl who fall in love and face many obstacles from her father and his rival. The movie is full of funny scenes, songs, and dialogues that will make you laugh out loud.

-

If you want to watch Ankhiyon Se Goli Maare in high quality and with subtitles, you need to download Ankhiyon Se Goli Maare in hindi torrent download 720p. You can download it from various websites that offer it for free or for a reasonable price. You can enjoy the movie on your device of choice, such as your computer, tablet, smartphone, or TV.

-

Downloading Ankhiyon Se Goli Maare in hindi torrent download 720p is easy and simple. You just need to follow some steps and use a torrent client software to download the file. You also need to have a good internet connection and enough storage space on your device. You also need to respect the copyright and terms of use of the source website.

-

Ankhiyon Se Goli Maare in hindi torrent download 720p is a file that will give you many benefits. You will save money and time by downloading it instead of buying or renting a DVD or a streaming subscription. You will have more control and flexibility over your viewing experience by downloading it. You will have more options and choices by downloading it. You will have more security and privacy by downloading it. You will have more satisfaction and enjoyment by downloading it.

-

Ankhiyon Se Goli Maare in hindi torrent download 720p is a file that you should not miss. It is a file that will make you happy and relaxed. It is a file that will make you a fan of Bollywood comedy movies.

-

Download Ankhiyon Se Goli Maare in hindi torrent download 720p today and enjoy watching one of the best Bollywood comedy movies ever made.

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/EA Games Generic Multi Keygen V214-FFF _VERIFIED_.md b/spaces/diacanFperku/AutoGPT/EA Games Generic Multi Keygen V214-FFF _VERIFIED_.md deleted file mode 100644 index b886a8a767827abb3e7963966c30baf345724099..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/EA Games Generic Multi Keygen V214-FFF _VERIFIED_.md +++ /dev/null @@ -1,6 +0,0 @@ -

EA Games Generic Multi Keygen v214-FFF


Download Zip 🆗 https://gohhs.com/2uFUce



- -in just 2 easy steps learn how to remove ea games generic multi keygen 214 - by fff.exe file, get full information and solutions for ea games generic multi keygen ... 1fdad05405
-
-
-

diff --git a/spaces/diaoren/OpenSetObstacleDetection/opendet2/engine/defaults.py b/spaces/diaoren/OpenSetObstacleDetection/opendet2/engine/defaults.py deleted file mode 100644 index 8a83ccb75be7bbd456cca839a291a2d242a22165..0000000000000000000000000000000000000000 --- a/spaces/diaoren/OpenSetObstacleDetection/opendet2/engine/defaults.py +++ /dev/null @@ -1,456 +0,0 @@ -import logging -import os -import weakref -from collections import OrderedDict -from typing import Dict - -import torch -from detectron2.checkpoint import DetectionCheckpointer -from detectron2.config import CfgNode -from detectron2.data import MetadataCatalog -from detectron2.engine import (AMPTrainer, SimpleTrainer, - TrainerBase, create_ddp_model, hooks, create_ddp_model, default_writers) -from detectron2.evaluation import (DatasetEvaluator, DatasetEvaluators, - inference_on_dataset, print_csv_format, - verify_results) -from detectron2.modeling import GeneralizedRCNNWithTTA, build_model -from detectron2.solver import build_lr_scheduler -from detectron2.utils import comm -from detectron2.utils.logger import setup_logger -from fvcore.nn.precise_bn import get_bn_modules - -from ..data import build_detection_test_loader, build_detection_train_loader -from ..evaluation import PascalVOCDetectionEvaluator -from ..solver import build_optimizer - - -class OpenDetTrainer(TrainerBase): - """ - A trainer with default training logic. It does the following: - - 1. Create a :class:`SimpleTrainer` using model, optimizer, dataloader - defined by the given config. Create a LR scheduler defined by the config. - 2. Load the last checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when - `resume_or_load` is called. - 3. Register a few common hooks defined by the config. - - It is created to simplify the **standard model training workflow** and reduce code boilerplate - for users who only need the standard training workflow, with standard features. - It means this class makes *many assumptions* about your training logic that - may easily become invalid in a new research. In fact, any assumptions beyond those made in the - :class:`SimpleTrainer` are too much for research. - - The code of this class has been annotated about restrictive assumptions it makes. - When they do not work for you, you're encouraged to: - - 1. Overwrite methods of this class, OR: - 2. Use :class:`SimpleTrainer`, which only does minimal SGD training and - nothing else. You can then add your own hooks if needed. OR: - 3. Write your own training loop similar to `tools/plain_train_net.py`. - - See the :doc:`/tutorials/training` tutorials for more details. - - Note that the behavior of this class, like other functions/classes in - this file, is not stable, since it is meant to represent the "common default behavior". - It is only guaranteed to work well with the standard models and training workflow in detectron2. - To obtain more stable behavior, write your own training logic with other public APIs. - - Examples: - :: - trainer = DefaultTrainer(cfg) - trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS - trainer.train() - - Attributes: - scheduler: - checkpointer (DetectionCheckpointer): - cfg (CfgNode): - """ - - def __init__(self, cfg): - """ - Args: - cfg (CfgNode): - """ - super().__init__() - logger = logging.getLogger("detectron2") - # setup_logger is not called for d2 - if not logger.isEnabledFor(logging.INFO): - setup_logger() - cfg = OpenDetTrainer.auto_scale_workers(cfg, comm.get_world_size()) - # Assume these objects must be constructed in this order. - # 建立模型 - model = self.build_model(cfg) - # 优化器 - optimizer = self.build_optimizer(cfg, model) - # 创建数据接口 - data_loader = self.build_train_loader(cfg) - # 创建分布式训练模型 - model = create_ddp_model( - model, broadcast_buffers=False, find_unused_parameters=True) - # model = create_ddp_model(model, broadcast_buffers=False) - #训练器 - self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)( - model, data_loader, optimizer - ) - # 学习率衰减 - self.scheduler = self.build_lr_scheduler(cfg, optimizer) - # 设置检查点 - self.checkpointer = DetectionCheckpointer( - # Assume you want to save checkpoints together with logs/statistics - model, - cfg.OUTPUT_DIR, - trainer=weakref.proxy(self), - ) - self.start_iter = 0 - self.max_iter = cfg.SOLVER.MAX_ITER - self.cfg = cfg - # 注册一些hook函数 - self.register_hooks(self.build_hooks()) - - # 继续训练/加载权重 - def resume_or_load(self, resume=True): - """ - If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by - a `last_checkpoint` file), resume from the file. Resuming means loading all - available states (eg. optimizer and scheduler) and update iteration counter - from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used. - - Otherwise, this is considered as an independent training. The method will load model - weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start - from iteration 0. - - Args: - resume (bool): whether to do resume or not - """ - self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume) - if resume and self.checkpointer.has_checkpoint(): - # The checkpoint stores the training iteration that just finished, thus we start - # at the next iteration - self.start_iter = self.iter + 1 - - def build_hooks(self): - """ - Build a list of default hooks, including timing, evaluation, - checkpointing, lr scheduling, precise BN, writing events. - - Returns: - list[HookBase]: - """ - cfg = self.cfg.clone() - cfg.defrost() - cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN - - ret = [ - hooks.IterationTimer(), - hooks.LRScheduler(), - hooks.PreciseBN( - # Run at the same freq as (but before) evaluation. - cfg.TEST.EVAL_PERIOD, - self.model, - # Build a new data loader to not affect training - self.build_train_loader(cfg), - cfg.TEST.PRECISE_BN.NUM_ITER, - ) - if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) - else None, - ] - - # Do PreciseBN before checkpointer, because it updates the model and need to - # be saved by checkpointer. - # This is not always the best: if checkpointing has a different frequency, - # some checkpoints may have more precise statistics than others. - if comm.is_main_process(): - ret.append(hooks.PeriodicCheckpointer( - self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD)) - - def test_and_save_results(): - self._last_eval_results = self.test(self.cfg, self.model) - return self._last_eval_results - - # Do evaluation after checkpointer, because then if it fails, - # we can use the saved checkpoint to debug. - ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) - - if comm.is_main_process(): - # Here the default print/log frequency of each writer is used. - # run writers in the end, so that evaluation metrics are written - ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) - return ret - - def build_writers(self): - """ - Build a list of writers to be used using :func:`default_writers()`. - If you'd like a different list of writers, you can overwrite it in - your trainer. - - Returns: - list[EventWriter]: a list of :class:`EventWriter` objects. - """ - return default_writers(self.cfg.OUTPUT_DIR, self.max_iter) - - # 进行训练 - def train(self): - """ - Run training. - - Returns: - OrderedDict of results, if evaluation is enabled. Otherwise None. - """ - super().train(self.start_iter, self.max_iter) - if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process(): - assert hasattr( - self, "_last_eval_results" - ), "No evaluation results obtained during training!" - verify_results(self.cfg, self._last_eval_results) - return self._last_eval_results - - def run_step(self): - self._trainer.iter = self.iter - self._trainer.run_step() - - @classmethod - def build_model(cls, cfg): - """ - Returns: - torch.nn.Module: - - It now calls :func:`detectron2.modeling.build_model`. - Overwrite it if you'd like a different model. - """ - model = build_model(cfg) - logger = logging.getLogger(__name__) - logger.info("Model:\n{}".format(model)) - return model - - @classmethod - def build_optimizer(cls, cfg, model): - """ - Returns: - torch.optim.Optimizer: - - It now calls :func:`detectron2.solver.build_optimizer`. - Overwrite it if you'd like a different optimizer. - """ - return build_optimizer(cfg, model) - - @classmethod - def build_lr_scheduler(cls, cfg, optimizer): - """ - It now calls :func:`detectron2.solver.build_lr_scheduler`. - Overwrite it if you'd like a different scheduler. - """ - return build_lr_scheduler(cfg, optimizer) - - @classmethod - def build_train_loader(cls, cfg): - """ - Returns: - iterable - - It now calls :func:`detectron2.data.build_detection_train_loader`. - Overwrite it if you'd like a different data loader. - """ - return build_detection_train_loader(cfg) - - @classmethod - def build_test_loader(cls, cfg, dataset_name): - """ - Returns: - iterable - - It now calls :func:`detectron2.data.build_detection_test_loader`. - Overwrite it if you'd like a different data loader. - """ - return build_detection_test_loader(cfg, dataset_name) - - #和detectron2源码中DefaultTrainer相比多实现了此方法 - @classmethod - def build_evaluator(cls, cfg, dataset_name, output_folder=None): - if output_folder is None: - output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") - evaluator_list = [] - # 获取evaluator类型 - evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type - - if evaluator_type == "pascal_voc": - # 返回自定义evaluator - return PascalVOCDetectionEvaluator(dataset_name, cfg) - - if len(evaluator_list) == 0: - raise NotImplementedError( - "no Evaluator for the dataset {} with the type {}".format( - dataset_name, evaluator_type - ) - ) - elif len(evaluator_list) == 1: - return evaluator_list[0] - return DatasetEvaluators(evaluator_list) - - @classmethod - def test_with_TTA(cls, cfg, model): - logger = logging.getLogger("detectron2.trainer") - # In the end of training, run an evaluation with TTA - # Only support some R-CNN models. - logger.info("Running inference with test-time augmentation ...") - model = GeneralizedRCNNWithTTA(cfg, model) - evaluators = [ - cls.build_evaluator( - cfg, name, output_folder=os.path.join( - cfg.OUTPUT_DIR, "inference_TTA") - ) - for name in cfg.DATASETS.TEST - ] - res = cls.test(cfg, model, evaluators) - res = OrderedDict({k + "_TTA": v for k, v in res.items()}) - return res - - @classmethod - def test(cls, cfg, model, evaluators=None): - """ - Args: - cfg (CfgNode): - model (nn.Module): - evaluators (list[DatasetEvaluator] or None): if None, will call - :meth:`build_evaluator`. Otherwise, must have the same length as - ``cfg.DATASETS.TEST``. - - Returns: - dict: a dict of result metrics - """ - logger = logging.getLogger(__name__) - if isinstance(evaluators, DatasetEvaluator): - evaluators = [evaluators] - if evaluators is not None: - assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( - len(cfg.DATASETS.TEST), len(evaluators) - ) - - # 结果的字典 - results = OrderedDict() - # 遍历所所有测试集 - for idx, dataset_name in enumerate(cfg.DATASETS.TEST): - # 建立当前测试集的dataloader - data_loader = cls.build_test_loader(cfg, dataset_name) - # When evaluators are passed in as arguments, - # implicitly assume that evaluators can be created before data_loader. - if evaluators is not None: - evaluator = evaluators[idx] - else: - try: - # 创建evaluator - evaluator = cls.build_evaluator(cfg, dataset_name) - except NotImplementedError: - logger.warn( - "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " - "or implement its `build_evaluator` method." - ) - results[dataset_name] = {} - continue - # 在测试集上测试并返回测试结果 - # {'mAP': , 'WI': , 'AOSE': , 'AP@K': , 'P@K': , 'R@K': , 'AP@U': , 'P@U': , 'R@U': } - results_i = inference_on_dataset(model, data_loader, evaluator) - results[dataset_name] = results_i - if comm.is_main_process(): - assert isinstance( - results_i, dict - ), "Evaluator must return a dict on the main process. Got {} instead.".format( - results_i - ) - logger.info( - "Evaluation results for {} in csv format:".format(dataset_name)) - print_csv_format(results_i) - - if len(results) == 1: - results = list(results.values())[0] - return results - - @staticmethod - def auto_scale_workers(cfg, num_workers: int): - """ - When the config is defined for certain number of workers (according to - ``cfg.SOLVER.REFERENCE_WORLD_SIZE``) that's different from the number of - workers currently in use, returns a new cfg where the total batch size - is scaled so that the per-GPU batch size stays the same as the - original ``IMS_PER_BATCH // REFERENCE_WORLD_SIZE``. - - Other config options are also scaled accordingly: - * training steps and warmup steps are scaled inverse proportionally. - * learning rate are scaled proportionally, following :paper:`ImageNet in 1h`. - - For example, with the original config like the following: - - .. code-block:: yaml - - IMS_PER_BATCH: 16 - BASE_LR: 0.1 - REFERENCE_WORLD_SIZE: 8 - MAX_ITER: 5000 - STEPS: (4000,) - CHECKPOINT_PERIOD: 1000 - - When this config is used on 16 GPUs instead of the reference number 8, - calling this method will return a new config with: - - .. code-block:: yaml - - IMS_PER_BATCH: 32 - BASE_LR: 0.2 - REFERENCE_WORLD_SIZE: 16 - MAX_ITER: 2500 - STEPS: (2000,) - CHECKPOINT_PERIOD: 500 - - Note that both the original config and this new config can be trained on 16 GPUs. - It's up to user whether to enable this feature (by setting ``REFERENCE_WORLD_SIZE``). - - Returns: - CfgNode: a new config. Same as original if ``cfg.SOLVER.REFERENCE_WORLD_SIZE==0``. - """ - old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE - if old_world_size == 0 or old_world_size == num_workers: - return cfg - cfg = cfg.clone() - frozen = cfg.is_frozen() - cfg.defrost() - - assert ( - cfg.SOLVER.IMS_PER_BATCH % old_world_size == 0 - ), "Invalid REFERENCE_WORLD_SIZE in config!" - scale = num_workers / old_world_size - bs = cfg.SOLVER.IMS_PER_BATCH = int( - round(cfg.SOLVER.IMS_PER_BATCH * scale)) - lr = cfg.SOLVER.BASE_LR = cfg.SOLVER.BASE_LR * scale - max_iter = cfg.SOLVER.MAX_ITER = int( - round(cfg.SOLVER.MAX_ITER / scale)) - warmup_iter = cfg.SOLVER.WARMUP_ITERS = int( - round(cfg.SOLVER.WARMUP_ITERS / scale)) - cfg.SOLVER.STEPS = tuple(int(round(s / scale)) - for s in cfg.SOLVER.STEPS) - cfg.TEST.EVAL_PERIOD = int(round(cfg.TEST.EVAL_PERIOD / scale)) - cfg.SOLVER.CHECKPOINT_PERIOD = int( - round(cfg.SOLVER.CHECKPOINT_PERIOD / scale)) - cfg.SOLVER.REFERENCE_WORLD_SIZE = num_workers # maintain invariant - logger = logging.getLogger(__name__) - logger.info( - f"Auto-scaling the config to batch_size={bs}, learning_rate={lr}, " - f"max_iter={max_iter}, warmup={warmup_iter}." - ) - - if frozen: - cfg.freeze() - return cfg - - -# Access basic attributes from the underlying trainer -for _attr in ["model", "data_loader", "optimizer"]: - setattr( - OpenDetTrainer, - _attr, - property( - # getter - lambda self, x=_attr: getattr(self._trainer, x), - # setter - lambda self, value, x=_attr: setattr(self._trainer, x, value), - ), - ) diff --git a/spaces/digitalxingtong/Eileen-Bert-Vits2/modules.py b/spaces/digitalxingtong/Eileen-Bert-Vits2/modules.py deleted file mode 100644 index 92e0f32a51c472bfd1659a50a95a95d195281d2b..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Eileen-Bert-Vits2/modules.py +++ /dev/null @@ -1,452 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform -from attentions import Encoder - -LRELU_SLOPE = 0.1 - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x -class TransformerCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - n_layers, - n_heads, - p_dropout=0, - filter_channels=0, - mean_only=False, - wn_sharing_parameter=None, - gin_channels = 0 - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, isflow = True, gin_channels = gin_channels) if wn_sharing_parameter is None else wn_sharing_parameter - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/digitalxingtong/Jiuxia-Bert-Vits2/text/japanese.py b/spaces/digitalxingtong/Jiuxia-Bert-Vits2/text/japanese.py deleted file mode 100644 index ddedafa0c5b7986068dc6c91637a86febc3923a9..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Jiuxia-Bert-Vits2/text/japanese.py +++ /dev/null @@ -1,104 +0,0 @@ -# modified from https://github.com/CjangCjengh/vits/blob/main/text/japanese.py -import re -import sys - -import pyopenjtalk - -from text import symbols - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile( - r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile( - r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (symbol, Japanese) pairs for marks: -_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('%', 'パーセント') -]] - - -# List of (consonant, sokuon) pairs: -_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'Q([↑↓]*[kg])', r'k#\1'), - (r'Q([↑↓]*[tdjʧ])', r't#\1'), - (r'Q([↑↓]*[sʃ])', r's\1'), - (r'Q([↑↓]*[pb])', r'p#\1') -]] - -# List of (consonant, hatsuon) pairs: -_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'N([↑↓]*[pbm])', r'm\1'), - (r'N([↑↓]*[ʧʥj])', r'n^\1'), - (r'N([↑↓]*[tdn])', r'n\1'), - (r'N([↑↓]*[kg])', r'ŋ\1') -]] - - - -def post_replace_ph(ph): - rep_map = { - ':': ',', - ';': ',', - ',': ',', - '。': '.', - '!': '!', - '?': '?', - '\n': '.', - "·": ",", - '、': ",", - '...': '…', - 'v': "V" - } - if ph in rep_map.keys(): - ph = rep_map[ph] - if ph in symbols: - return ph - if ph not in symbols: - ph = 'UNK' - return ph - -def symbols_to_japanese(text): - for regex, replacement in _symbols_to_japanese: - text = re.sub(regex, replacement, text) - return text - - -def preprocess_jap(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - text = symbols_to_japanese(text) - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = [] - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - p = pyopenjtalk.g2p(sentence) - text += p.split(" ") - - if i < len(marks): - text += [marks[i].replace(' ', '')] - return text - -def text_normalize(text): - # todo: jap text normalize - return text - -def g2p(norm_text): - phones = preprocess_jap(norm_text) - phones = [post_replace_ph(i) for i in phones] - # todo: implement tones and word2ph - tones = [0 for i in phones] - word2ph = [1 for i in phones] - return phones, tones, word2ph - - -if __name__ == '__main__': - for line in open("../../../Downloads/transcript_utf8.txt").readlines(): - text = line.split(":")[1] - phones, tones, word2ph = g2p(text) - for p in phones: - if p == "z": - print(text, phones) - sys.exit(0) diff --git a/spaces/digitalxingtong/Xingtong-Longread-Bert-VITS2/short_audio_transcribe.py b/spaces/digitalxingtong/Xingtong-Longread-Bert-VITS2/short_audio_transcribe.py deleted file mode 100644 index f1e8b30671f2c2f2fa3c93feb1f4edd3fbe2f545..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Xingtong-Longread-Bert-VITS2/short_audio_transcribe.py +++ /dev/null @@ -1,122 +0,0 @@ -import whisper -import os -import json -import torchaudio -import argparse -import torch - -lang2token = { - 'zh': "[ZH]", - 'ja': "[JA]", - "en": "[EN]", - } -def transcribe_one(audio_path): - # load audio and pad/trim it to fit 30 seconds - audio = whisper.load_audio(audio_path) - audio = whisper.pad_or_trim(audio) - - # make log-Mel spectrogram and move to the same device as the model - mel = whisper.log_mel_spectrogram(audio).to(model.device) - - # detect the spoken language - _, probs = model.detect_language(mel) - print(f"Detected language: {max(probs, key=probs.get)}") - lang = max(probs, key=probs.get) - # decode the audio - options = whisper.DecodingOptions(beam_size=5) - result = whisper.decode(model, mel, options) - - # print the recognized text - print(result.text) - return lang, result.text -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--languages", default="CJE") - parser.add_argument("--whisper_size", default="medium") - args = parser.parse_args() - if args.languages == "CJE": - lang2token = { - 'zh': "[ZH]", - 'ja': "[JA]", - "en": "[EN]", - } - elif args.languages == "CJ": - lang2token = { - 'zh': "[ZH]", - 'ja': "[JA]", - } - elif args.languages == "C": - lang2token = { - 'zh': "[ZH]", - } - assert (torch.cuda.is_available()), "Please enable GPU in order to run Whisper!" - model = whisper.load_model(args.whisper_size) - parent_dir = "./custom_character_voice/" - speaker_names = list(os.walk(parent_dir))[0][1] - speaker_annos = [] - total_files = sum([len(files) for r, d, files in os.walk(parent_dir)]) - # resample audios - # 2023/4/21: Get the target sampling rate - with open("./configs/config.json", 'r', encoding='utf-8') as f: - hps = json.load(f) - target_sr = hps['data']['sampling_rate'] - processed_files = 0 - for speaker in speaker_names: - for i, wavfile in enumerate(list(os.walk(parent_dir + speaker))[0][2]): - # try to load file as audio - if wavfile.startswith("processed_"): - continue - try: - wav, sr = torchaudio.load(parent_dir + speaker + "/" + wavfile, frame_offset=0, num_frames=-1, normalize=True, - channels_first=True) - wav = wav.mean(dim=0).unsqueeze(0) - if sr != target_sr: - wav = torchaudio.transforms.Resample(orig_freq=sr, new_freq=target_sr)(wav) - if wav.shape[1] / sr > 20: - print(f"{wavfile} too long, ignoring\n") - save_path = parent_dir + speaker + "/" + f"processed_{i}.wav" - torchaudio.save(save_path, wav, target_sr, channels_first=True) - # transcribe text - lang, text = transcribe_one(save_path) - if lang not in list(lang2token.keys()): - print(f"{lang} not supported, ignoring\n") - continue - text = "ZH|" + text + "\n"# - #text = lang2token[lang] + text + lang2token[lang] + "\n" - speaker_annos.append(save_path + "|" + speaker + "|" + text) - - processed_files += 1 - print(f"Processed: {processed_files}/{total_files}") - except: - continue - - # # clean annotation - # import argparse - # import text - # from utils import load_filepaths_and_text - # for i, line in enumerate(speaker_annos): - # path, sid, txt = line.split("|") - # cleaned_text = text._clean_text(txt, ["cjke_cleaners2"]) - # cleaned_text += "\n" if not cleaned_text.endswith("\n") else "" - # speaker_annos[i] = path + "|" + sid + "|" + cleaned_text - # write into annotation - if len(speaker_annos) == 0: - print("Warning: no short audios found, this IS expected if you have only uploaded long audios, videos or video links.") - print("this IS NOT expected if you have uploaded a zip file of short audios. Please check your file structure or make sure your audio language is supported.") - with open("./filelists/short_character_anno.list", 'w', encoding='utf-8') as f: - for line in speaker_annos: - f.write(line) - - # import json - # # generate new config - # with open("./configs/finetune_speaker.json", 'r', encoding='utf-8') as f: - # hps = json.load(f) - # # modify n_speakers - # hps['data']["n_speakers"] = 1000 + len(speaker2id) - # # add speaker names - # for speaker in speaker_names: - # hps['speakers'][speaker] = speaker2id[speaker] - # # save modified config - # with open("./configs/modified_finetune_speaker.json", 'w', encoding='utf-8') as f: - # json.dump(hps, f, indent=2) - # print("finished") diff --git a/spaces/dineshreddy/WALT/mmdet/models/necks/hrfpn.py b/spaces/dineshreddy/WALT/mmdet/models/necks/hrfpn.py deleted file mode 100644 index ed4f194832fc4b6ea77ce54262fb8ffa8675fc4e..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/mmdet/models/necks/hrfpn.py +++ /dev/null @@ -1,102 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule, caffe2_xavier_init -from torch.utils.checkpoint import checkpoint - -from ..builder import NECKS - - -@NECKS.register_module() -class HRFPN(nn.Module): - """HRFPN (High Resolution Feature Pyramids) - - paper: `High-Resolution Representations for Labeling Pixels and Regions - `_. - - Args: - in_channels (list): number of channels for each branch. - out_channels (int): output channels of feature pyramids. - num_outs (int): number of output stages. - pooling_type (str): pooling for generating feature pyramids - from {MAX, AVG}. - conv_cfg (dict): dictionary to construct and config conv layer. - norm_cfg (dict): dictionary to construct and config norm layer. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. - stride (int): stride of 3x3 convolutional layers - """ - - def __init__(self, - in_channels, - out_channels, - num_outs=5, - pooling_type='AVG', - conv_cfg=None, - norm_cfg=None, - with_cp=False, - stride=1): - super(HRFPN, self).__init__() - assert isinstance(in_channels, list) - self.in_channels = in_channels - self.out_channels = out_channels - self.num_ins = len(in_channels) - self.num_outs = num_outs - self.with_cp = with_cp - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - - self.reduction_conv = ConvModule( - sum(in_channels), - out_channels, - kernel_size=1, - conv_cfg=self.conv_cfg, - act_cfg=None) - - self.fpn_convs = nn.ModuleList() - for i in range(self.num_outs): - self.fpn_convs.append( - ConvModule( - out_channels, - out_channels, - kernel_size=3, - padding=1, - stride=stride, - conv_cfg=self.conv_cfg, - act_cfg=None)) - - if pooling_type == 'MAX': - self.pooling = F.max_pool2d - else: - self.pooling = F.avg_pool2d - - def init_weights(self): - """Initialize the weights of module.""" - for m in self.modules(): - if isinstance(m, nn.Conv2d): - caffe2_xavier_init(m) - - def forward(self, inputs): - """Forward function.""" - assert len(inputs) == self.num_ins - outs = [inputs[0]] - for i in range(1, self.num_ins): - outs.append( - F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear')) - out = torch.cat(outs, dim=1) - if out.requires_grad and self.with_cp: - out = checkpoint(self.reduction_conv, out) - else: - out = self.reduction_conv(out) - outs = [out] - for i in range(1, self.num_outs): - outs.append(self.pooling(out, kernel_size=2**i, stride=2**i)) - outputs = [] - - for i in range(self.num_outs): - if outs[i].requires_grad and self.with_cp: - tmp_out = checkpoint(self.fpn_convs[i], outs[i]) - else: - tmp_out = self.fpn_convs[i](outs[i]) - outputs.append(tmp_out) - return tuple(outputs) diff --git a/spaces/dinhminh20521597/OCR_DEMO/configs/_base_/recog_datasets/academic_test.py b/spaces/dinhminh20521597/OCR_DEMO/configs/_base_/recog_datasets/academic_test.py deleted file mode 100644 index 888ab3d3be5b40e15596086d4af567bd37f6ec05..0000000000000000000000000000000000000000 --- a/spaces/dinhminh20521597/OCR_DEMO/configs/_base_/recog_datasets/academic_test.py +++ /dev/null @@ -1,57 +0,0 @@ -# Text Recognition Testing set, including: -# Regular Datasets: IIIT5K, SVT, IC13 -# Irregular Datasets: IC15, SVTP, CT80 - -test_root = 'data/mixture' - -test_img_prefix1 = f'{test_root}/IIIT5K/' -test_img_prefix2 = f'{test_root}/svt/' -test_img_prefix3 = f'{test_root}/icdar_2013/' -test_img_prefix4 = f'{test_root}/icdar_2015/' -test_img_prefix5 = f'{test_root}/svtp/' -test_img_prefix6 = f'{test_root}/ct80/' - -test_ann_file1 = f'{test_root}/IIIT5K/test_label.txt' -test_ann_file2 = f'{test_root}/svt/test_label.txt' -test_ann_file3 = f'{test_root}/icdar_2013/test_label_1015.txt' -test_ann_file4 = f'{test_root}/icdar_2015/test_label.txt' -test_ann_file5 = f'{test_root}/svtp/test_label.txt' -test_ann_file6 = f'{test_root}/ct80/test_label.txt' - -test1 = dict( - type='OCRDataset', - img_prefix=test_img_prefix1, - ann_file=test_ann_file1, - loader=dict( - type='AnnFileLoader', - repeat=1, - file_format='txt', - parser=dict( - type='LineStrParser', - keys=['filename', 'text'], - keys_idx=[0, 1], - separator=' ')), - pipeline=None, - test_mode=True) - -test2 = {key: value for key, value in test1.items()} -test2['img_prefix'] = test_img_prefix2 -test2['ann_file'] = test_ann_file2 - -test3 = {key: value for key, value in test1.items()} -test3['img_prefix'] = test_img_prefix3 -test3['ann_file'] = test_ann_file3 - -test4 = {key: value for key, value in test1.items()} -test4['img_prefix'] = test_img_prefix4 -test4['ann_file'] = test_ann_file4 - -test5 = {key: value for key, value in test1.items()} -test5['img_prefix'] = test_img_prefix5 -test5['ann_file'] = test_ann_file5 - -test6 = {key: value for key, value in test1.items()} -test6['img_prefix'] = test_img_prefix6 -test6['ann_file'] = test_ann_file6 - -test_list = [test1, test2, test3, test4, test5, test6] diff --git a/spaces/dorkai/text-generation-webui-main/run.py b/spaces/dorkai/text-generation-webui-main/run.py deleted file mode 100644 index 2c966a2f5691c6444c3329365c39e78b74fdbf95..0000000000000000000000000000000000000000 --- a/spaces/dorkai/text-generation-webui-main/run.py +++ /dev/null @@ -1,4 +0,0 @@ -import os -os.system('python download-model.py PygmalionAI/pygmalion-350m --branch main') -# os.system('python download-model.py waifu-workshop/pygmalion-6b --branch original-sharded') -os.system('python server.py --cpu --chat --model pygmalion-350m --no-stream --auto-devices') \ No newline at end of file diff --git a/spaces/duchaba/ct_bactrian/app.py b/spaces/duchaba/ct_bactrian/app.py deleted file mode 100644 index c3567c40bd9a69ec5f07cbe0fce844fb5f1c28ec..0000000000000000000000000000000000000000 --- a/spaces/duchaba/ct_bactrian/app.py +++ /dev/null @@ -1,384 +0,0 @@ - -## required lib, required "pip install" -# import transformers -# import accelerate -import openai -import llama_index -import torch -import cryptography -import cryptography.fernet -## interface libs, required "pip install" -import gradio -import huggingface_hub -import huggingface_hub.hf_api -## standard libs, no need to install -import json -import requests -import time -import os -import random -import re -import sys -import psutil -import threading -import socket -# import PIL -# import pandas -import matplotlib -class HFace_Pluto(object): - # - # initialize the object - def __init__(self, name="Pluto",*args, **kwargs): - super(HFace_Pluto, self).__init__(*args, **kwargs) - self.author = "Duc Haba" - self.name = name - self._ph() - self._pp("Hello from class", str(self.__class__) + " Class: " + str(self.__class__.__name__)) - self._pp("Code name", self.name) - self._pp("Author is", self.author) - self._ph() - # - # define class var for stable division - self._device = 'cuda' - self._steps = [3,8,21,55,89,144] - self._guidances = [1.1,3.0,5.0,8.0,13.0,21.0] - self._xkeyfile = '.xoxo' - self._models = [] - self._seed = 667 # sum of walnut in ascii (or Angle 667) - self._width = 512 - self._height = 512 - self._step = 50 - self._guidances = 7.5 - self._llama_query_engine = None - self._llama_index_doc = None - self._llama_indexes_dict = None - self._llama_query_engines_dict = None - #self._generator = torch.Generator(device='cuda') - self.pipes = [] - self.prompts = [] - self.images = [] - self.seeds = [] - self.fname_id = 0 - self.dname_img = "img_colab/" - self._huggingface_key="gAAAAABkduT-XeiYtD41bzjLtwsLCe9y1FbHH6wZkOZwvLwCrgmOtNsFUPWVqMVG8MumazFhiUZy91mWEnLDLCFw3eKNWtOboIyON6yu4lctn6RCQ4Y9nJvx8wPyOnkzt7dm5OISgFcm" - self._gpt_key="'gAAAAABkgiYGQY8ef5y192LpNgrAAZVCP3bo2za9iWSZzkyOJtc6wykLwGjFjxKFpsNryMgEhCATJSonslooNSBJFM3OcnVBz4jj_lyXPQABOCsOWqZm6W9nrZYTZkJ0uWAAGJV2B8uzQ13QZgI7VCZ12j8Q7WfrIg=='" - self._fkey="=cvsOPRcWD6JONmdr4Sh6-PqF6nT1InYh965mI8f_sef" - self._color_primary = '#2780e3' #blue - self._color_secondary = '#373a3c' #dark gray - self._color_success = '#3fb618' #green - self._color_info = '#9954bb' #purple - self._color_warning = '#ff7518' #orange - self._color_danger = '#ff0039' #red - self._color_mid_gray = '#495057' - return - # - # pretty print output name-value line - def _pp(self, a, b,is_print=True): - # print("%34s : %s" % (str(a), str(b))) - x = f'{"%34s" % str(a)} : {str(b)}' - y = None - if (is_print): - print(x) - else: - y = x - return y - # - # pretty print the header or footer lines - def _ph(self,is_print=True): - x = f'{"-"*34} : {"-"*34}' - y = None - if (is_print): - print(x) - else: - y = x - return y - # - # fetch huggingface file - def fetch_hface_files(self, - hf_names, - hf_space="duchaba/monty", - local_dir="/content/"): - f = str(hf_names) + " is not iteratable, type: " + str(type(hf_names)) - try: - for f in hf_names: - lo = local_dir + f - huggingface_hub.hf_hub_download(repo_id=hf_space, filename=f, - use_auth_token=True,repo_type=huggingface_hub.REPO_TYPE_SPACE, - force_filename=lo) - except: - self._pp("*Error", f) - return - # - # - def push_hface_files(self, - hf_names, - hf_space="duchaba/skin_cancer_diagnose", - local_dir="/content/"): - f = str(hf_names) + " is not iteratable, type: " + str(type(hf_names)) - try: - for f in hf_names: - lo = local_dir + f - huggingface_hub.upload_file( - path_or_fileobj=lo, - path_in_repo=f, - repo_id=hf_space, - repo_type=huggingface_hub.REPO_TYPE_SPACE) - except Exception as e: - self._pp("*Error", e) - return - # - def push_hface_folder(self, hf_folder, hf_space_id, hf_dest_folder=None): - api = huggingface_hub.HfApi() - api.upload_folder(folder_path=hf_folder, - repo_id=hf_space_id, - path_in_repo=hf_dest_folder, - repo_type="space") - return - # - # Define a function to display available CPU and RAM - def fetch_system_info(self): - s='' - # Get CPU usage as a percentage - cpu_usage = psutil.cpu_percent() - # Get available memory in bytes - mem = psutil.virtual_memory() - # Convert bytes to gigabytes - mem_total_gb = mem.total / (1024 ** 3) - mem_available_gb = mem.available / (1024 ** 3) - mem_used_gb = mem.used / (1024 ** 3) - # Print the results - s += f"CPU usage: {cpu_usage}%\n" - s += f"Total memory: {mem_total_gb:.2f} GB\n" - s += f"Available memory: {mem_available_gb:.2f} GB\n" - # print(f"Used memory: {mem_used_gb:.2f} GB") - s += f"Memory usage: {mem_used_gb/mem_total_gb:.2f}%\n" - return s - # - def restart_script_periodically(self): - while True: - #random_time = random.randint(540, 600) - random_time = random.randint(15800, 21600) - time.sleep(random_time) - os.execl(sys.executable, sys.executable, *sys.argv) - return - # - def write_file(self,fname, txt): - f = open(fname, "w") - f.writelines("\n".join(txt)) - f.close() - return - # - def fetch_gpu_info(self): - s='' - try: - s += f'Your GPU is the {torch.cuda.get_device_name(0)}\n' - s += f'GPU ready staus {torch.cuda.is_available()}\n' - s += f'GPU allocated RAM: {round(torch.cuda.memory_allocated(0)/1024**3,1)} GB\n' - s += f'GPU reserved RAM {round(torch.cuda.memory_reserved(0)/1024**3,1)} GB\n' - except Exception as e: - s += f'**Warning, No GPU: {e}' - return s - # - def _fetch_crypt(self,is_generate=False): - s=self._fkey[::-1] - if (is_generate): - s=open(self._xkeyfile, "rb").read() - return s - # - def _gen_key(self): - key = cryptography.fernet.Fernet.generate_key() - with open(self._xkeyfile, "wb") as key_file: - key_file.write(key) - return - # - def _decrypt_it(self, x): - y = self._fetch_crypt() - f = cryptography.fernet.Fernet(y) - m = f.decrypt(x) - return m.decode() - # - def _encrypt_it(self, x): - key = self._fetch_crypt() - p = x.encode() - f = cryptography.fernet.Fernet(key) - y = f.encrypt(p) - return y - # - def _login_hface(self): - huggingface_hub.login(self._decrypt_it(self._huggingface_key), - add_to_git_credential=True) # non-blocking login - self._ph() - return - # - def _fetch_version(self): - s = '' - # print(f"{'torch: 2.0.1':<25} Actual: {torch.__version__}") - # print(f"{'transformers: 4.29.2':<25} Actual: {transformers.__version__}") - s += f"{'openai: 0.27.7,':<28} Actual: {openai.__version__}\n" - s += f"{'huggingface_hub: 0.14.1,':<28} Actual: {huggingface_hub.__version__}\n" - s += f"{'gradio: 3.32.0,':<28} Actual: {gradio.__version__}\n" - s += f"{'cryptography: 41.0.1,':<28} Actual: {cryptography.__version__}\n" - s += f"{'llama_index: 0.6.21.post1,':<28} Actual: {llama_index.__version__}\n" - return s - # - def _fetch_host_ip(self): - s='' - hostname = socket.gethostname() - ip_address = socket.gethostbyname(hostname) - s += f"Hostname: {hostname}\n" - s += f"IP Address: {ip_address}\n" - return s - # - def _setup_openai(self,key=None): - if (key is None): - key = self._decrypt_it(self._gpt_key) - # - openai.api_key = key - os.environ["OPENAI_API_KEY"] = key - return - # - def _fetch_index_files(self,llama_ix): - res = [] - x = llama_ix.ref_doc_info - for val in x.values(): - jdata = json.loads(val.to_json()) - try: - fname = jdata['extra_info']['file_name'] - res.append(fname) - except: - fname = jdata['metadata']['file_name'] - res.append(fname) - # remove dublication name - res = list(set(res)) - return res - # - def _fetch_dir_name(self,directory): - dname=[] - for name in os.listdir(directory): - if os.path.isdir(os.path.join(directory, name)): - if (name[0] != '.'): - print(name) - dname.append(name) - dname.sort() - return dname -# add module/method -# -import functools -def add_method(cls): - def decorator(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - return func(*args, **kwargs) - setattr(cls, func.__name__, wrapper) - return func # returning func means func can still be used normally - return decorator -# -monty = HFace_Pluto("Monty") -monty._login_hface() -print(monty._fetch_version()) -monty._ph() -print(monty.fetch_system_info()) -monty._ph() -print(monty.fetch_gpu_info()) -monty._ph() -print(monty._fetch_host_ip()) -monty._ph() -print('setup openai for DaVinci model') -monty._setup_openai() - -monty.github_name = 'sandbox_llama_data' -monty._llama_root_path = f'./{monty.github_name}/' -monty.llama_collection_name = monty._fetch_dir_name(monty._llama_root_path) - -@add_method(HFace_Pluto) -def gen_llama_collection_dict(self, collec): - is_index = [] - index_token=[] - query_engine=[] - for i in collec: - k = f'{self._llama_root_path}{i}/transformer_token_storage/index_store.json' - is_index.append(os.path.isfile(k)) - index_token.append(False) - query_engine.append(False) - d = {collec: {"is_index": is_index, "index_token": index_token, "query_engine": query_engine} - for collec, is_index, index_token, query_engine in zip(collec, is_index, index_token, query_engine)} - return d - -monty.llama_collection_dict = monty.gen_llama_collection_dict(monty.llama_collection_name) - -@add_method(HFace_Pluto) -def _load_llama_index(self,vindex='vector_index',vpath='./index_storage'): - idx = None - try: - storage_context = llama_index.StorageContext.from_defaults(persist_dir=vpath) - # load index - idx = llama_index.load_index_from_storage(storage_context, index_id=vindex) - print(f'Index doc are: {self._fetch_index_files(idx)}') - except Exception as e: - print(f'**Error: can not load index, check the index_storage directory: {vpath}') - print('If do not have index tokens then run the .gen_llama_index() function') - print(f'Exception: {e}') - return idx -# -@add_method(HFace_Pluto) -def load_llama_engine(self,collec_dict): - for key, value in collec_dict.items(): - if (collec_dict[key]['index_token'] is None) or (collec_dict[key]['index_token'] is False): - p = f'{self._llama_root_path}{key}/transformer_token_storage' - # print('A1: ', p) - idx = self._load_llama_index(vpath=p) - collec_dict[key]['index_token'] = idx - # - collec_dict[key]['query_engine'] = collec_dict[key]['index_token'].as_query_engine() - # print('key: ', collec_dict[key]['index_token']) - return - -monty.load_llama_engine(monty.llama_collection_dict) - -@add_method(HFace_Pluto) -def ask_me(self, p, ll_engine_name, ll_member): - engine = self.llama_collection_dict[ll_engine_name]['query_engine'] - px = f'My name is {ll_member}. Answer the following request. Request: {p}' - resp = engine.query(px) - return resp - -in_box = [gradio.Textbox(lines=1, label="Your AI request", placeholder="Select LLM and member first") - ,gradio.Radio(["bank_of_america", "ford", "humana", "stagwell"], label="Select your LLM", value='bank_of_america', info="Chose Fine-tune model") - ,gradio.Radio(["Duc", "Prashant", "Dicam", "Kenton", "Joe", "Amanda", "Pradeep", "Stephany"], label="Login Member", value='Duc', info="Member profile: Duc, Prashant is BofA. Dicam, Kenton is Ford. Joe, Amanda is Humana. Pradeep, Stephany is Stagwell.") - ] -out_box = [gradio.Textbox(label="LLM Bactrian response:") - # ,gradio.Textbox(lines=4, label="Response Raw JSON Data:") - ] -# - -title = "C&T Bactrian LLM Model" -desc = '*Note: This model is the fine-tuned DaVinci-Text-003 LLM using Llama Index technique.' -# arti = f'
  • The documents for fine-tuning are:
  • {monty.self._fetch_index_files(monty._llama_index_doc)}' -arti = '**Llama Index files:
      ' -for key, value in monty.llama_collection_dict.items(): - try: - idx = monty.llama_collection_dict[key]['index_token'] - arti += f'
    • {key} Index Doc: {monty._fetch_index_files(idx)}
    • ' - except Exception as e: - arti += f'
    • {key} Index Doc: *Error, {e}
    • ' -# -arti += '
    ' -exp = [ - ['Write about Bank of America investment plan according to my income, and display the accuracy. Display my age.', 'bank_of_america', 'Duc'], - ['What would be a best new Ford car for me? Display my gender, and give me the accuracy of your answer.', 'ford', 'Dicam'], - ['What is the benefits of the Humana Gold Plus plan? Display my gender, name, and give me the accuracy of your answer.', 'humana', 'Amanda'], - ['From 2019 to 2023, Write a summary on Stagwell growth and challenges. Display the reference to on the answer.', 'stagwell', 'Pradeep'] - ] -flag_opt = [': Good', ': Bad'] -flag_dir = './user_feed_back' - -gradio.Interface(fn=monty.ask_me, - inputs=in_box, - outputs=out_box, - examples=exp, - title=title, - description=desc, - # allow_flagging='manual', # or 'auto' or 'never' - # flagging_dir=flag_dir, - # flagging_options=flag_opt, - article=arti).launch() diff --git a/spaces/dvc890/go-chatgpt-api/api/platform/access_token.go b/spaces/dvc890/go-chatgpt-api/api/platform/access_token.go deleted file mode 100644 index 4add2c27875176ddd2bbd0b465131ac9b9f38ded..0000000000000000000000000000000000000000 --- a/spaces/dvc890/go-chatgpt-api/api/platform/access_token.go +++ /dev/null @@ -1,116 +0,0 @@ -package platform - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "net/url" - "strings" - - "github.com/linweiyuan/go-chatgpt-api/api" - - http "github.com/bogdanfinn/fhttp" -) - -//goland:noinspection GoUnhandledErrorResult,GoErrorStringFormat,GoUnusedParameter -func (userLogin *UserLogin) GetAuthorizedUrl(csrfToken string) (string, int, error) { - urlParams := url.Values{ - "client_id": {platformAuthClientID}, - "audience": {platformAuthAudience}, - "redirect_uri": {platformAuthRedirectURL}, - "scope": {platformAuthScope}, - "response_type": {platformAuthResponseType}, - } - req, _ := http.NewRequest(http.MethodGet, platformAuth0Url+urlParams.Encode(), nil) - req.Header.Set("Content-Type", api.ContentType) - req.Header.Set("User-Agent", api.UserAgent) - resp, err := userLogin.client.Do(req) - if err != nil { - return "", http.StatusInternalServerError, err - } - - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return "", resp.StatusCode, errors.New(api.GetAuthorizedUrlErrorMessage) - } - - return resp.Request.URL.String(), http.StatusOK, nil -} - -func (userLogin *UserLogin) GetState(authorizedUrl string) (string, int, error) { - split := strings.Split(authorizedUrl, "=") - return split[1], http.StatusOK, nil -} - -//goland:noinspection GoUnhandledErrorResult,GoErrorStringFormat -func (userLogin *UserLogin) CheckUsername(state string, username string) (int, error) { - formParams := fmt.Sprintf( - "state=%s&username=%s&js-available=true&webauthn-available=true&is-brave=false&webauthn-platform-available=false&action=default", - state, - username, - ) - req, err := http.NewRequest(http.MethodPost, api.LoginUsernameUrl+state, strings.NewReader(formParams)) - req.Header.Set("Content-Type", api.ContentType) - req.Header.Set("User-Agent", api.UserAgent) - resp, err := userLogin.client.Do(req) - if err != nil { - return http.StatusInternalServerError, err - } - - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return resp.StatusCode, errors.New(api.EmailInvalidErrorMessage) - } - - return http.StatusOK, nil -} - -//goland:noinspection GoUnhandledErrorResult,GoErrorStringFormat -func (userLogin *UserLogin) CheckPassword(state string, username string, password string) (string, int, error) { - formParams := fmt.Sprintf( - "state=%s&username=%s&password=%s&action=default", - state, - username, - password, - ) - req, err := http.NewRequest(http.MethodPost, api.LoginPasswordUrl+state, strings.NewReader(formParams)) - req.Header.Set("Content-Type", api.ContentType) - req.Header.Set("User-Agent", api.UserAgent) - resp, err := userLogin.client.Do(req) - if err != nil { - return "", http.StatusInternalServerError, err - } - - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return "", resp.StatusCode, errors.New(api.EmailOrPasswordInvalidErrorMessage) - } - - return resp.Request.URL.Query().Get("code"), http.StatusOK, nil -} - -//goland:noinspection GoUnhandledErrorResult,GoErrorStringFormat -func (userLogin *UserLogin) GetAccessToken(code string) (string, int, error) { - jsonBytes, _ := json.Marshal(GetAccessTokenRequest{ - ClientID: platformAuthClientID, - Code: code, - GrantType: platformAuthGrantType, - RedirectURI: platformAuthRedirectURL, - }) - req, err := http.NewRequest(http.MethodPost, getTokenUrl, strings.NewReader(string(jsonBytes))) - req.Header.Set("Content-Type", "application/json") - req.Header.Set("User-Agent", api.UserAgent) - resp, err := userLogin.client.Do(req) - if err != nil { - return "", http.StatusInternalServerError, err - } - - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return "", resp.StatusCode, errors.New(api.GetAccessTokenErrorMessage) - } - - data, _ := io.ReadAll(resp.Body) - return string(data), http.StatusOK, nil -} diff --git a/spaces/eson/tokenizer-arena/vocab/moss/test_zh_coding_len.py b/spaces/eson/tokenizer-arena/vocab/moss/test_zh_coding_len.py deleted file mode 100644 index 024a86ae91a7677b7bd2a14b762c3a04def908f9..0000000000000000000000000000000000000000 --- a/spaces/eson/tokenizer-arena/vocab/moss/test_zh_coding_len.py +++ /dev/null @@ -1,82 +0,0 @@ -""" -1. jd_vocab_tokens的中文: - - - -2. 中文标点 - - -3. 全中文(单字) unicode - - -4. 全中文() -词典大小:46145。其中 中文汉字数:{'total': 25359, '中文单字': 5089, '中文多字': 20270}, 中文标点数: 266 -""" - -from collections import Counter -from transformers import AutoTokenizer -from data_sample.oov_base import jd_vocab_tokens -from utils.text_util import is_chinese, has_chinese -from zhon.hanzi import punctuation as zh_punc - -tokenizer = AutoTokenizer.from_pretrained("tokenizer", trust_remote_code=True) -# tokenizer = Tokenizer.from_file("../gpt_neox_chinese/20B_tokenizer_chinese.json") -vocab = tokenizer.get_vocab() - -def zh_iterator(): - for idx in range(ord(u'\u4e00'), ord(u'\u9fa5')): - yield (chr(idx)) - - -def test_coding_length(vocab, filter=None): - all_length = [] - for word in vocab: - if len(word) > 1: - continue - if filter is not None and filter(word): - continue - tokens = tokenizer.encode(word) - all_length.append(len(tokens)) - # if len(tokens.ids) > 1: - if len(tokens.ids) == 1: - print(word, tokens.ids) - - print("编码长度统计:", Counter(all_length)) - print("平均编码长度:", sum(all_length)/len(all_length)) - - -def has_zh_char(text): - return any(ch in zh_punc for ch in text) - - -def iter_vocab(): - - f_out = open("vocab.zh.txt", "w", encoding="utf-8") - zh_token_count = {"total": 0, "中文单字": 0, "中文多字": 0} - zh_symbol_count = 0 - for idx in range(len(vocab)): - decode_str = tokenizer.decode([idx]) - if has_chinese(decode_str): - zh_token_count["total"] += 1 - if len(decode_str.strip()) > 1: - zh_token_count["中文多字"] += 1 - else: - zh_token_count["中文单字"] += 1 - - - f_out.write("%d\t%s\t中文汉字\n" % (idx, decode_str)) - - - elif has_zh_char(decode_str): - zh_symbol_count += 1 - f_out.write("%d\t%s\t中文标点\n" % (idx, decode_str)) - - print("词典大小:%d。其中 中文汉字数:%s, 中文标点数: %d" % (len(vocab), str(zh_token_count), zh_symbol_count)) - - -if __name__ == "__main__": - - # test_coding_length(jd_vocab_tokens, filter=lambda k: not is_chinese(k)) - # test_coding_length(zh_punc) - # test_coding_length(zh_iterator()) - iter_vocab() \ No newline at end of file diff --git a/spaces/evawade17/acne_detector/README.md b/spaces/evawade17/acne_detector/README.md deleted file mode 100644 index b3cfb33784f911c9891838cda3d5743c45f2bbbf..0000000000000000000000000000000000000000 --- a/spaces/evawade17/acne_detector/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Acne Detector -emoji: 📈 -colorFrom: gray -colorTo: purple -sdk: gradio -sdk_version: 3.5 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/exbert-project/exbert/client/src/ts/vis/AttentionConnector.ts b/spaces/exbert-project/exbert/client/src/ts/vis/AttentionConnector.ts deleted file mode 100644 index 3f5fe7b92ca0a4dc4fc54c2936e284530c4d6853..0000000000000000000000000000000000000000 --- a/spaces/exbert-project/exbert/client/src/ts/vis/AttentionConnector.ts +++ /dev/null @@ -1,280 +0,0 @@ -import * as d3 from "d3"; -import 'd3-selection-multi' -import { D3Sel } from "../etc/Util"; -import { Edge, EdgeData } from "./EdgeConnector" -import { VComponent } from "./VisComponent"; -import { SimpleEventHandler } from "../etc/SimpleEventHandler"; -import * as tp from "../etc/types" - -export type AttentionData = number[][] - -export const scaleLinearWidth = opacity => 5 * opacity^0.33; - -export class AttentionGraph extends VComponent{ - css_name = ''; - _current: {}; - - _data: AttentionData; // The passed data - edgeData: EdgeData; // A wrapper around _data. User should not mind - plotData: Edge[]; // Needed for plotting - - /** COMPONENTS - * Expose the components belonging to the class as properties of the class. - * This is useful to create methods that specifically modify a single part or component without having to reselect it. - * Makes for more responsive applications - * */ - svg: D3Sel; - graph: D3Sel; - - // The below components require data - paths: D3Sel; - opacityScales: d3.ScaleLinear[]; - linkGen: d3.Link - - // OPTIONS WITH DEFAULTS - _threshold = 0.7; // Accumulation threshold. Between 0-1 - normBy: tp.NormBy - - static events = {} // No events needed for this one - - options = { - boxheight: 26, // The height of the div boxes around the SVG element - height: 500, - width: 200, - offset: 0, // Should I offset the left side by 1 or not? - } - - constructor(d3Parent: D3Sel, eventHandler?: SimpleEventHandler, options: {} = {}) { - super(d3Parent, eventHandler) - this.superInitSVG(options) - this._init() - } - - _init() { - this.svg = this.parent; - this.graph = this.svg.selectAll(`.atn-curve`); - this.linkGen = d3.linkHorizontal() - .x(d => d[0]) - .y(d => d[1]); - } - - // Define whether to use the 'j' or 'i' attribute to calculate opacities - private scaleIdx(): "i" | "j" { - switch (this.normBy) { - case tp.NormBy.COL: - return 'j' - case tp.NormBy.ROW: - return 'i' - case tp.NormBy.ALL: - return 'i' - - } - - } - - /** - * Create connections between locations of the SVG using D3's linkGen - */ - private createConnections() { - const self = this; - const op = this.options; - if (this.paths) { - this.paths.attrs({ - 'd': (d, i) => { - const data: { source: [number, number], target: [number, number] } = - { - source: [0, op.boxheight * (d.i + 0.5 + op.offset)], - target: [op.width, op.boxheight * (d.j + 0.5)] // + 2 allows small offset - }; - return this.linkGen(data); - }, - 'class': 'atn-curve' - }) - .attr("src-idx", (d, i) => d.i) - .attr("target-idx", (d, i) => d.j); - } - } - - /** - * Change the height of the SVG - */ - private updateHeight() { - const op = this.options; - if (this.svg != null) { - this.svg.attr("height", this.options.height + (op.offset * this.options.boxheight)) - } - return this; - } - - /** - * Change the width of the SVG - */ - private updateWidth() { - if (this.svg != null) { - this.svg.attr("width", this.options.width) - } - return this; - } - - /** - * Change the Opacity of the lines according to the value of the data - */ - private updateOpacity() { - const self = this; - if (this.paths != null) { - // paths.transition().duration(500).attr('opacity', (d) => { - this.paths.attr('opacity', (d) => { - const val = this.opacityScales[d[self.scaleIdx()]](d.v); - return val; - }) - this.paths.attr('stroke-width', (d) => { - const val = this.opacityScales[d[self.scaleIdx()]](d.v); - return scaleLinearWidth(val) //5 * val^0.33; - }) - } - return this; - } - - /** - * Rerender the graph in the event that the data changes - */ - private updateData() { - if (this.graph != null) { - d3.selectAll(".atn-curve").remove(); - - const data = this.plotData - - this.paths = this.graph - .data(data) - .join('path'); - - this.createConnections(); - this.updateOpacity(); - - return this; - } - } - - /** - * Scale the opacity according to the values of the data, from 0 to max of contained data - * Normalize by each source target, or across the whole - */ - private createScales = () => { - this.opacityScales = []; - let arr = [] - - // Group normalization - switch (this.normBy){ - case tp.NormBy.ROW: - arr = this.edgeData.extent(1); - this.opacityScales = []; - arr.forEach((v, i) => { - (this.opacityScales as d3.ScaleLinear[]).push( - d3.scaleLinear() - .domain([0, v[1]]) - .range([0, 0.9]) - ) - }) - break; - case tp.NormBy.COL: - arr = this.edgeData.extent(0); - this.opacityScales = []; - arr.forEach((v, i) => { - (this.opacityScales as d3.ScaleLinear[]).push( - d3.scaleLinear() - .domain([0, v[1]]) - .range([0, 0.9]) - ) - }) - break; - case tp.NormBy.ALL: - const maxIn = d3.max(this.plotData.map((d) => d.v)) - for (let i = 0; i < this._data.length; i++) { - this.opacityScales.push(d3.scaleLinear() - .domain([0, maxIn]) - .range([0, 1])); - } - break; - default: - console.log("Nor norming specified"); - break; - } - } - - /** - * Access / modify the data in a D3 style way. If modified, the component will update just the part that is needed to be updated - */ - data(): AttentionData - data(value: AttentionData): this - data(value?) { - if (value == null) { - return this._data; - } - - this._data = value; - this.edgeData = new EdgeData(value); - this.plotData = this.edgeData.format(this._threshold); - this.createScales(); - this.updateData(); - return this; - } - - /** - * Access / modify the height in a D3 style way. If modified, the component will update just the part that is needed to be updated - */ - height(): number - height(value: number): this - height(value?) { - if (value == null) { - return this.options.height - } - - this.options.height = value - this.updateHeight() - return this; - } - - /** - * Access / modify the width in a D3 style way. If modified, the component will update just the part that is needed to be updated - */ - width(): number - width(value: number): this - width(value?: number): this | number { - if (value == null) { - return this.options.width; - } - this.options.width = value; - this.updateWidth(); - return this; - } - - /** - * Access / modify the threshold in a D3 style way. If modified, the component will update just the part that is needed to be updated - */ - threshold(): number - threshold(value: number): this - threshold(value?) { - if (value == null) { - return this._threshold; - } - - this._threshold = value; - this.plotData = this.edgeData.format(this._threshold); - this.createScales(); - this.updateData(); - return this; - } - - _wrangle(data: AttentionData) { - return data; - } - - _render(data: AttentionData) { - this.svg.html('') - this.updateHeight(); - this.updateWidth(); - - this.updateData(); - return this; - } -} \ No newline at end of file diff --git a/spaces/facebook/MusicGen/audiocraft/grids/diffusion/_explorers.py b/spaces/facebook/MusicGen/audiocraft/grids/diffusion/_explorers.py deleted file mode 100644 index 0bf4ca57b63f5f9308bd1178ddbde5d8f06748e5..0000000000000000000000000000000000000000 --- a/spaces/facebook/MusicGen/audiocraft/grids/diffusion/_explorers.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import treetable as tt - -from .._base_explorers import BaseExplorer - - -class DiffusionExplorer(BaseExplorer): - eval_metrics = ["sisnr", "visqol"] - - def stages(self): - return ["train", "valid", "valid_ema", "evaluate", "evaluate_ema"] - - def get_grid_meta(self): - """Returns the list of Meta information to display for each XP/job. - """ - return [ - tt.leaf("index", align=">"), - tt.leaf("name", wrap=140), - tt.leaf("state"), - tt.leaf("sig", align=">"), - ] - - def get_grid_metrics(self): - """Return the metrics that should be displayed in the tracking table. - """ - return [ - tt.group( - "train", - [ - tt.leaf("epoch"), - tt.leaf("loss", ".3%"), - ], - align=">", - ), - tt.group( - "valid", - [ - tt.leaf("loss", ".3%"), - # tt.leaf("loss_0", ".3%"), - ], - align=">", - ), - tt.group( - "valid_ema", - [ - tt.leaf("loss", ".3%"), - # tt.leaf("loss_0", ".3%"), - ], - align=">", - ), - tt.group( - "evaluate", [tt.leaf("rvm", ".4f"), tt.leaf("rvm_0", ".4f"), - tt.leaf("rvm_1", ".4f"), tt.leaf("rvm_2", ".4f"), - tt.leaf("rvm_3", ".4f"), ], align=">" - ), - tt.group( - "evaluate_ema", [tt.leaf("rvm", ".4f"), tt.leaf("rvm_0", ".4f"), - tt.leaf("rvm_1", ".4f"), tt.leaf("rvm_2", ".4f"), - tt.leaf("rvm_3", ".4f")], align=">" - ), - ] diff --git a/spaces/falterWliame/Face_Mask_Detection/Daz 3D Poser - The Kids 4 Pro Bundle Full Full Version [PATCHED].md b/spaces/falterWliame/Face_Mask_Detection/Daz 3D Poser - The Kids 4 Pro Bundle Full Full Version [PATCHED].md deleted file mode 100644 index a68c42883bd1fa525c0f08b305c7d67a8fecaf3e..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Daz 3D Poser - The Kids 4 Pro Bundle Full Full Version [PATCHED].md +++ /dev/null @@ -1,12 +0,0 @@ -

    Daz 3D Poser - The Kids 4 Pro Bundle full full version


    Downloadhttps://urlca.com/2uDcaB



    - -The Kids 4 is the obvious choice for all your gen 4 renders. . . Compatible figures: The Kids 4. Compatible software: Poser. Kids 4 Pro set. With this kit you will be able to create more realistic detailed models in Poser -At this level, you will be able to create models for all your Poser projects as well as edit them. -This kit is perfect for working with Poser Pro.It includes 2 versions of the set: Plus and Pro. -If you purchase the Pro kit, then all the necessary parts for the Poser Pro will be included. -The Poser Pro Kit includes the following components and materials: -• 20 standard Poser Pro parts -• 45 parts 8a78ff9644
    -
    -
    -

    diff --git a/spaces/falterWliame/Face_Mask_Detection/Desktop Mining Bitcoin Monero Ethereum V 4.0.0.md b/spaces/falterWliame/Face_Mask_Detection/Desktop Mining Bitcoin Monero Ethereum V 4.0.0.md deleted file mode 100644 index 251d95065f7411018bea21cb2367a3955f6f286c..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Desktop Mining Bitcoin Monero Ethereum V 4.0.0.md +++ /dev/null @@ -1,46 +0,0 @@ -

    Desktop Mining Bitcoin, Monero, Ethereum, V 4.0.0


    DOWNLOAD ››››› https://urlca.com/2uDcd3



    - -We create an instant desktop wallet that allows you to access, monitor and control your coins 24/7. - -It’s the fastest to setup and is extremely easy to use. - -Bounties the only desktops mining software that offers you the most profitability, stability and availability. - -Bounties aims to eliminate downtime from being a part of your hardware mining experience and have the best version of the Bounties wallet on the market. - -Have your own private mine directly from your desktop in minutes. - -We don’t charge any service fees. - -Never worry about having your wallet/hardware offline for updates, Bounties is always up-to-date! - -You are able to set up and monitor your mining from your computer or smartphone. - -You can send payments through your chosen payment processor (WU) Bounties allows you to do this directly from the coin. - -Bounties comes with a pre-installed WU app that allows you to make payments via cash, check, credit card or a direct bank transfer. - -Bounties can sync your wallets 24/7 which will allow you to see all transactions and balance. - -This is extremely helpful if you are mining more than one coin or you want to see your wallets balance and total current income. - -Bounties lets you monitor your mining in real-time and record your daily statistics. - -Bounties offers every miner a unique watch list that they can use to monitor their hardware. - -Bounties has one of the best monitoring features in the industry. Bounties will allow you to see the miner status of your hardware in real-time, quickly shut down your miner and turn it back on. - -Bounties has great feedback on your hashrate and allows you to monitor your hashrate in real-time. - -You can also download statistics directly from Bounties. You can even view these statistics offline on any computer. - -Bounties will let you know if your miner is down and whether or not you need to replace it. - -You can contact Bounties support via ticket or chat with them. - -Once you have received a refund for your hardware/warranty you will need to send them the required information (serial numbers) to get a new hardware warranty. - -Not only do we create a desktop wallet, we also provide 24/7 technical support via phone and email. 4fefd39f24
    -
    -
    -

    diff --git a/spaces/falterWliame/Face_Mask_Detection/Prince Of Persia 720p Dual Audio 2021.md b/spaces/falterWliame/Face_Mask_Detection/Prince Of Persia 720p Dual Audio 2021.md deleted file mode 100644 index 72aa7e6d5f035dbeb6225f3284ff29d093cac2fd..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Prince Of Persia 720p Dual Audio 2021.md +++ /dev/null @@ -1,7 +0,0 @@ -
    -

    set a thousand years after the first game, divinity: original sin ii presents a darker, more grounded narrative and expands on the tactical combat system of its award-winning predecessor. in divinity: original sin, you were on a quest for the forbidden source magic. now, you yourself are a sourcerer: a dangerously powerful individual whose abilities summon creatures from the encroaching void. the divine is dead and the void is everywhere. sourcerers are blamed and the divine order is leading the charge against the threat: you. captured and sent to fort joy, you will be cured of your powers -- no matter the cost. but the order has secrets of its own that may cast doubt on its so-called holy mission. as you escape from fort joy, you realize that if the world remains godless, it will be consumed by the void.

    -

    this is the hindi dub of the film prince of persia: the sands of time. the film is a remake of the original game, which was based on the graphic adventure game of the same name by jordan mechner, and the film was released in india in december 2010.

    -

    Prince Of Persia 720p Dual Audio


    Download Zip >>>>> https://urlca.com/2uDdHV



    -

    widescreen theatrical 2.40:1. source: theatrical. for more information please visit - hindi dual audio bluray 720p.zip details. 720p hd bluray 720p dual audio hindi bollywood movies free download for windows. there are many great stars featured in this movie some are jake gyllenhaal, gemma arterton, ben kingsley, and many other hollywood stars. the film is a remake of the original game, which was based on the graphic adventure game of the same name by jordan mechner, and the film was released in india in december 2010.

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/Project Zomboid Build 39.67.5 [Win64] DRM-PATCHED Free Game Hack.md b/spaces/falterWliame/Face_Mask_Detection/Project Zomboid Build 39.67.5 [Win64] DRM-PATCHED Free Game Hack.md deleted file mode 100644 index 1ece6ed8223727ee7212ed26869c1127b7b211b3..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Project Zomboid Build 39.67.5 [Win64] DRM-PATCHED Free Game Hack.md +++ /dev/null @@ -1,9 +0,0 @@ -

    Project Zomboid Build 39.67.5 [Win64] DRM-Free game hack


    Downloadhttps://urlca.com/2uDd0i



    -
    -May 25, 2559 B.C. - Description: Project Zomboid is a zombie apocalypse simulator with . The game has a new server settings editor. . Ubuntu 17.04 x64 gnome with gdm2 on qt5 -May 25, 2559 BC - Description: Project Zomboid is a zombie apocalypse simulator with . The game has a new server settings editor. . -Ubuntu 17.04 x64 gnome with gdm2 on qt5 -May 25, 2559 BC - Description: Project Zomboid is a zombie apocalypse simulator with . The game has a new server settings editor. . 8a78ff9644
    -
    -
    -

    diff --git a/spaces/fatiXbelha/sd/Candy Crush Saga MOD Download Unlock All Levels and Features with One Click.md b/spaces/fatiXbelha/sd/Candy Crush Saga MOD Download Unlock All Levels and Features with One Click.md deleted file mode 100644 index d2be37e55afcff18a8997b1dc317c45b94c51558..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Candy Crush Saga MOD Download Unlock All Levels and Features with One Click.md +++ /dev/null @@ -1,122 +0,0 @@ -
    -

    How to Download Mod Candy Crush Saga

    -

    If you are a fan of match 3 puzzle games, you have probably heard of Candy Crush Saga, one of the most popular and addictive games in this genre. But did you know that you can download a modded version of Candy Crush Saga that gives you unlimited lives, boosters, moves, and more? In this article, we will show you how to download and install mod candy crush saga on your Android device.

    -

    What is Candy Crush Saga?

    -

    A popular match 3 puzzle game

    -

    Candy Crush Saga is a game developed by King, a leading company in the mobile gaming industry. The game was released in 2012 and has since become one of the most downloaded and played games in the world. The game has over a trillion levels, each with different goals and challenges. The basic gameplay involves matching three or more candies of the same color to clear them from the board and earn points. You can also create special candies by matching four or more candies or combining two special candies. These special candies can help you clear more candies, break obstacles, or trigger other effects.

    -

    download mod candy crush saga


    DOWNLOAD →→→ https://urllie.com/2uNHEp



    -

    Features and benefits of playing Candy Crush Saga

    -

    Candy Crush Saga is not only fun and entertaining, but also has many features and benefits that make it appealing to players of all ages and preferences. Some of these features and benefits are:

    -
      -
    • It is free to play, but you can also purchase in-game items such as extra moves, lives, or boosters to help you progress faster.
    • -
    • It has stunning graphics, animations, and sound effects that create a colorful and immersive experience.
    • -
    • It has a variety of game modes and puzzles that test your skills and strategy.
    • -
    • It has a social aspect that allows you to connect with your friends and other players through Facebook or King.com. You can compare your scores, send and receive lives, or compete in leaderboards and events.
    • -
    • It is updated regularly with new levels, features, and events to keep you engaged and challenged.
    • -
    -

    What is a mod apk?

    -

    A modified version of an original app

    -

    A mod apk is a modified version of an original app that has been altered by someone other than the developer. A mod apk usually has some changes or additions that are not present in the original app. For example, a mod apk may have unlimited resources, unlocked features, removed ads, or added functionality.

    -

    Advantages and disadvantages of using a mod apk

    -

    Using a mod apk can have some advantages and disadvantages depending on your purpose and preference. Some of these advantages and disadvantages are:

    -
      -
    • The main advantage of using a mod apk is that it can give you access to features or resources that are otherwise unavailable or limited in the original app. This can make the game more enjoyable, easier, or interesting.
    • -
    • The main disadvantage of using a mod apk is that it can pose some risks to your device or account. A mod apk may contain malware, viruses, or spyware that can harm your device or steal your data. A mod apk may also violate the terms of service or policies of the original app or platform, which can result in your account being banned or suspended.
    • -
    -

    How to download and install mod candy crush saga?

    -

    Find a reliable source for the mod apk file

    -

    The first step to download and install mod candy crush saga is to find a reliable source for the mod apk file. There are many websites that offer mod apk files for various apps and games, but not all of them are trustworthy or safe. Some websites may have fake or outdated mod apk files that may not work or may harm your device. Therefore, you should always do some research and check the reviews and ratings of the website before downloading any mod apk file. You can also use some antivirus or security apps to scan the mod apk file before installing it.

    -

    Enable unknown sources on your device

    -

    The second step to download and install mod candy crush saga is to enable unknown sources on your device. This is because mod apk files are not downloaded from the official Google Play Store, but from other sources that are not verified by Google. Therefore, you need to allow your device to install apps from unknown sources. To do this, you can follow these steps:

    -
      -
    1. Go to your device settings and look for security or privacy options.
    2. -
    3. Find the option that says unknown sources or install unknown apps and toggle it on.
    4. -
    5. A warning message may appear, telling you the risks of installing apps from unknown sources. Tap on OK or Allow to proceed.
    6. -
    -

    Download and install the mod apk file

    -

    The third step to download and install mod candy crush saga is to download and install the mod apk file. To do this, you can follow these steps:

    -

    download candy crush saga mod apk unlimited lives
    -download candy crush saga mod apk latest version
    -download candy crush saga mod apk happymod
    -download candy crush saga mod apk unlocked all levels
    -download candy crush saga mod apk for android
    -download candy crush saga mod apk offline
    -download candy crush saga mod apk with facebook connect
    -download candy crush saga mod apk no root
    -download candy crush saga mod apk free shopping
    -download candy crush saga mod apk 2023
    -download candy crush saga hack mod apk
    -download candy crush saga mega mod apk
    -download candy crush saga super mod apk
    -download candy crush saga premium mod apk
    -download candy crush saga gold bars mod apk
    -download candy crush saga unlimited boosters mod apk
    -download candy crush saga unlimited moves mod apk
    -download candy crush saga unlimited everything mod apk
    -download candy crush saga full version mod apk
    -download candy crush saga cracked mod apk
    -how to download candy crush saga mod apk
    -where to download candy crush saga mod apk
    -best site to download candy crush saga mod apk
    -safe way to download candy crush saga mod apk
    -easy steps to download candy crush saga mod apk
    -download and install candy crush saga mod apk
    -download and play candy crush saga mod apk
    -download and update candy crush saga mod apk
    -download and enjoy candy crush saga mod apk
    -download and share candy crush saga mod apk
    -free download of candy crush saga mod apk
    -direct download of candy crush saga mod apk
    -fast download of candy crush saga mod apk
    -secure download of candy crush saga mod apk
    -reliable download of candy crush saga mod apk
    -download link of candy crush saga mod apk
    -download page of candy crush saga mod apk
    -download site of candy crush saga mod apk
    -download source of candy crush saga mod apk
    -download file of candy crush saga mod apk
    -why download candy crush saga mod apk
    -what is candy crush saga mod apk
    -benefits of downloading candy crush saga mod apk
    -features of downloading candy crush saga mod apk
    -advantages of downloading candy crush saga mod apk
    -disadvantages of downloading candy crush saga mod apk
    -risks of downloading candy crush saga mod apk
    -reviews of downloading candy crush saga mod apk
    -ratings of downloading candy crush saga mod apk

    -
      -
    1. Open your browser and go to the website where you found the mod apk file for candy crush saga.
    2. -
    3. Tap on the download button or link and wait for the file to be downloaded.
    4. -
    5. Once the file is downloaded, go to your file manager and locate the mod apk file. It should be in your downloads folder or in a folder named after the website.
    6. -
    7. Tap on the mod apk file and a pop-up window will appear, asking you to install the app. Tap on Install and wait for the installation to complete.
    8. -
    -

    Enjoy the modded features of Candy Crush Saga

    -

    The final step to download and install mod candy crush saga is to enjoy the modded features of Candy Crush Saga. To do this, you can follow these steps:

    -
      -
    1. Open the app and sign in with your Facebook or King.com account if you have one. This will sync your progress and achievements with the original app.
    2. -
    3. Start playing the game and notice the differences from the original app. You should have unlimited lives, boosters, moves, and more. You can also access all the levels and game modes without any restrictions.
    4. -
    5. Have fun and enjoy the game!
    6. -
    -

    Conclusion

    -

    Candy Crush Saga is a fun and addictive match 3 puzzle game that has millions of fans around the world. However, if you want to experience more features and benefits from the game, you can try downloading and installing a modded version of Candy Crush Saga that gives you unlimited resources and access. In this article, we showed you how to download and install mod candy crush saga on your Android device in four easy steps. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below.

    -

    FAQs

    -
      -
    • Is it safe to use a mod apk?
    • -

      A: It depends on the source and quality of the mod apk file. Some mod apk files may contain malware, viruses, or spyware that can harm your device or steal your data. Some mod apk files may also violate the terms of service or policies of the original app or platform, which can result in your account being banned or suspended. Therefore, you should always be careful and cautious when using a mod apk file. You should also backup your data and use antivirus or security apps to protect your device.

      -
    • Will I lose my progress or achievements if I use a mod apk?
    • -

      A: No, you will not lose your progress or achievements if you use a mod apk file for Candy Crush Saga. You can sign in with your Facebook or King.com account to sync your progress and achievements with the original app. However, you should be aware that using a mod apk may affect your gameplay experience or cause some glitches or errors in the game.

      -
    • Can I switch back to the original app if I don't like the mod apk?
    • -

      A: Yes, you can switch back to the original app if you don't like the mod apk file for Candy Crush Saga. You can uninstall the mod apk file from your device and download the original app from the Google Play Store. However, you should note that some of your data or settings may be lost or changed when you switch back to the original app.

      -
    • Can I update the mod apk file when a new version is released?
    • -

      A: It depends on the availability and compatibility of the mod apk file. Some mod apk files may be updated automatically or manually by the modder or the website. Some mod apk files may not be updated or may not work with the latest version of the original app. Therefore, you should always check the compatibility and availability of the mod apk file before updating it.

      -
    • Where can I find more mod apk files for other apps and games?
    • -

      A: There are many websites that offer mod apk files for various apps and games, but not all of them are trustworthy or safe. Some of the websites that are popular and reliable for mod apk files are:

      - -

      However, you should always do some research and check the reviews and ratings of the website before downloading any mod apk file. You should also use some antivirus or security apps to scan the mod apk file before installing it.

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Dragon Era M APK A Turn-Based Pet MMORPG with Seven Major Civilizations to Discover.md b/spaces/fatiXbelha/sd/Dragon Era M APK A Turn-Based Pet MMORPG with Seven Major Civilizations to Discover.md deleted file mode 100644 index 3ca289a19c7b6fa631b8ddc86ef01b96c1db6b06..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Dragon Era M APK A Turn-Based Pet MMORPG with Seven Major Civilizations to Discover.md +++ /dev/null @@ -1,124 +0,0 @@ - -

      Dragon Era M APK: A Turn-Based Pet MMORPG Mobile Game

      -

      Do you love pets and fantasy adventures? If yes, then you should try Dragon Era M APK, a turn-based pet MMORPG mobile game that will take you to a wonderful world of magic and mystery. In this game, you can collect, fight, and ride thousands of pets, from cute and playful ones to fierce and powerful ones. You can also explore different civilizations, enjoy social interactions, and experience realistic weather and graphics. In this article, we will tell you everything you need to know about Dragon Era M APK, including its features, how to download and install it, and how to play it.

      -

      What is Dragon Era M APK?

      -

      Dragon Era M APK is an Android game developed by Archosaur Games, the same team behind Dragon Raja SEA. It is the Thai version of Dream of a New World, a popular Chinese game that has millions of players. Dragon Era M APK uses the UE4 technology to create a high-quality 3D world that you can freely explore. It also has a turn-based combat system that requires strategy and skill. You can choose from four different character classes: Warrior, Mage, Assassin, and Priest. Each class has its own strengths and weaknesses, as well as unique skills and abilities.

      -

      dragon era m apk


      Download Filehttps://urllie.com/2uNB7s



      -

      Features of Dragon Era M APK

      -

      Dragon Era M APK has many features that make it an exciting and enjoyable game. Here are some of them:

      -

      Thousands of pets to collect, fight, and ride

      -

      One of the main attractions of Dragon Era M APK is the pet system. You can collect over 1,000 different pets, from dinosaurs to fairies. You can also fight with them in battles, train them, customize them, and even ride them. There are three types of rideable pets: land, sea, and air. You can travel the world with your pets and experience different terrains and environments.

      -

      dragon era m apk download
      -dragon era m apk mod
      -dragon era m apk latest version
      -dragon era m apk free download
      -dragon era m apk unlimited money
      -dragon era m apk android
      -dragon era m apk obb
      -dragon era m apk offline
      -dragon era m apk 1.4.6
      -dragon era m apk hack
      -dragon era m game apk
      -dragon era m mod apk 2023
      -dragon era m mod apk unlimited gems
      -dragon era m mod apk android 1
      -dragon era m mod apk revdl
      -dragon era m mod apk rexdl
      -dragon era m mod apk happymod
      -dragon era m mod apk an1
      -dragon era m mod apk no root
      -dragon era m mod apk online
      -dragon era m thai version apk
      -dragon era m english version apk
      -dragon era m global version apk
      -dragon era m chinese version apk
      -dragon era m korean version apk
      -dragon era m sea version apk
      -dragon era m beta version apk
      -dragon era m full version apk
      -dragon era m new version apk
      -dragon era m old version apk
      -how to download dragon era m apk
      -how to install dragon era m apk
      -how to update dragon era m apk
      -how to play dragon era m apk
      -how to hack dragon era m apk
      -how to get unlimited money in dragon era m apk
      -how to get free gems in dragon era m apk
      -how to get pets in dragon era m apk
      -how to change language in dragon era m apk
      -how to join discord in dragon era m apk

      -

      Dynamic weather and 360-degree view

      -

      Another feature of Dragon Era M APK is the realistic weather system. The game has a panoramic view that allows you to see the changes in the weather, such as rain, snow, fog, thunderstorm, etc. The weather also affects the gameplay and strategy. For example, some pets may perform better or worse depending on the weather conditions. You can also rotate the camera 360 degrees to see every detail of the scene.

      -

      Social interactions and hot springs

      -

      Dragon Era M APK is not only about fighting and exploring. It is also about making friends and having fun. You can join a family, which is similar to a guild or clan in other games. You can chat with your family members, help each other out, and participate in family events. You can also enjoy some relaxing activities with your friends, such as soaking in hot springs under the starry sky. You can hug, rub the back, or even kiss your friends in the hot springs.

      -

      Seven civilizations and realistic CG

      -

      Dragon Era M APK has a rich and diverse world that consists of seven civilizations: Atlantis Empire (underwater), Conch Bay (island), Eldorado (desert), Mahaya Tribe (forest), Snowy Kingdom (snowy), Steam City (industrial), and Wonderland (fantasy). Each civilization has its own culture, history, architecture, landscape, etc. You can explore each civilization and discover its secrets and mysteries. The game also has realistic CG animations that show the story of each civilization.

      -

      UE4 technology and stunning graphics

      -

      Dragon Era M APK uses the UE4 technology, which is a powerful and advanced game engine developed by Epic Games. UE4 provides a complete suite of game development tools that enable high-quality 3D graphics, realistic physics, dynamic lighting, and immersive sound. Dragon Era M APK showcases the capabilities of UE4 with its stunning graphics and smooth animations. The game has a detailed and diverse world that you can explore with your pets. You can see the reflections of the water, the shadows of the trees, the movements of the clouds, and the effects of the weather. The game also has a cinematic mode that allows you to capture and share your best moments in the game.

      -

      How to download and install Dragon Era M APK?

      -

      If you are interested in playing Dragon Era M APK, you need to download and install it on your Android device. Here are the requirements and steps to do so:

      -

      Requirements and compatibility

      -

      To play Dragon Era M APK, you need to have an Android device that meets the following requirements:

      -
        -
      • Android version: 5.0 or higher
      • -
      • RAM: 2 GB or more
      • -
      • Storage: 5 GB or more
      • -
      • Internet connection: stable and fast
      • -
      -

      The game is compatible with most Android devices, but some models may experience problems or errors. If you encounter any issues, you can contact the customer service of the game or check the official website for more information.

      -

      Steps to download and install

      -

      To download and install Dragon Era M APK, you need to follow these steps:

      -
        -
      1. Go to the official website of the game or any trusted third-party source that provides the APK file of the game.
      2. -
      3. Download the APK file to your device. You may need to enable the installation of unknown sources in your device settings.
      4. -
      5. Locate the APK file in your device storage and tap on it to start the installation process.
      6. -
      7. Follow the instructions on the screen and wait for the installation to finish.
      8. -
      9. Launch the game and enjoy!
      10. -
      -

      How to play Dragon Era M APK?

      -

      Now that you have downloaded and installed Dragon Era M APK, you are ready to play it. Here are some tips on how to play the game:

      -

      Choose your character and pet

      -

      The first thing you need to do is to create your character. You can choose from four classes: Warrior, Mage, Assassin, or Priest. Each class has its own advantages and disadvantages, as well as different skills and abilities. You can also customize your character's appearance, such as hair, eyes, skin, clothes, etc. After creating your character, you need to choose your first pet. You can pick from three types: land, sea, or air. Each type has its own characteristics and attributes. You can also name your pet and change its appearance later.

      -

      Explore the world and complete quests

      -

      The next thing you need to do is to explore the world of Dragon Era M APK. You can travel to different civilizations and regions, such as Atlantis Empire, Conch Bay, Eldorado, Mahaya Tribe, Snowy Kingdom, Steam City, and Wonderland. Each place has its own story, culture, landscape, and secrets. You can also complete various quests that will help you progress in the game. Some quests are main quests that are related to the main storyline of the game. Others are side quests that are optional but rewarding. You can also find hidden quests that are triggered by certain actions or events.

      -

      Join a family and make friends

      -

      The third thing you need to do is to join a family and make friends in Dragon Era M APK. A family is similar to a guild or clan in other games. You can join an existing family or create your own family with other players. By joining a family, you can chat with your family members, help each other out, participate in family events, and get family rewards. You can also make friends with other players in the game. You can chat with them, send them gifts, invite them to join your team, or visit their homes. You can also enjoy some relaxing activities with them, such as soaking in hot springs.

      -

      Participate in events and battles

      -

      The fourth thing you need to do is to participate in events and battles in Dragon Era M APK. There are many events and activities that you can join in the game, such as festivals, treasure hunts, fishing contests, etc. These events are fun and rewarding, and they also help you improve your skills and reputation. You can also participate in battles with your pets, either solo or with a team. There are different modes of battles, such as arena, dungeon, raid, world boss, etc. These battles are challenging and strategic, and they also help you earn rewards and rankings.

      -

      Conclusion

      -

      Dragon Era M APK is a turn-based pet MMORPG mobile game that will take you to a wonderful world of magic and mystery. You can collect, fight, and ride thousands of pets, explore different civilizations, enjoy social interactions, and experience realistic weather and graphics. You can also download and install the game easily on your Android device, and play it with your friends or family. Dragon Era M APK is a game that will keep you entertained and engaged for hours. If you are looking for a new and exciting game to play, you should give Dragon Era M APK a try!

      -

      FAQs

      -

      Here are some frequently asked questions about Dragon Era M APK:

      -
        -
      • Q: Is Dragon Era M APK free to play?
      • -
      • A: Yes, Dragon Era M APK is free to play. However, there are some optional in-game purchases that you can make with real money.
      • -
      • Q: Is Dragon Era M APK available in other languages?
      • -
      • A: Currently, Dragon Era M APK is only available in Thai language. However, the developers may add more languages in the future.
      • -
      • Q: How can I contact the customer service of Dragon Era M APK?
      • -
      • A: You can contact the customer service of Dragon Era M APK by using the following methods:
      • -
          -
        • Email: support@archosaur.com
        • -
        • Facebook: https://www.facebook.com/DragonEraM/
        • -
        • Line: @dragoneram
        • -
        -
      • Q: How can I update Dragon Era M APK?
      • -
      • A: You can update Dragon Era M APK by following these steps:
      • -
          -
        1. Go to the official website of the game or any trusted third-party source that provides the latest version of the APK file of the game.
        2. -
        3. Download the APK file to your device.
        4. -
        5. Locate the APK file in your device storage and tap on it to start the installation process.
        6. -
        7. Follow the instructions on the screen and wait for the installation to finish.
        8. -
        9. Launch the game and enjoy!
        10. -
        -
      • Q: How can I get more pets in Dragon Era M APK?
      • -
      • A: You can get more pets in Dragon Era M APK by using these methods:
      • -
          -
        • Capture them in the wild with pet balls.
        • -
        • Hatch them from eggs that you can get from quests or events.
        • -
        • Breed them with other pets that you have or borrow from your friends.
        • -
        • Exchange them with other players or buy them from the market.
        • -
        -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Enjoy Wormate.io with MOD APK Free Money and Gems for Android.md b/spaces/fatiXbelha/sd/Enjoy Wormate.io with MOD APK Free Money and Gems for Android.md deleted file mode 100644 index d0b66f80549f668c1eab97de56533ee72a7f3a81..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Enjoy Wormate.io with MOD APK Free Money and Gems for Android.md +++ /dev/null @@ -1,100 +0,0 @@ - -

      Wormate.io APK Hile: How to Get Unlimited Money and Gems in Wormate.io

      -

      Do you love playing worm games online? Do you want to become the biggest and longest worm in the arena? Do you want to have unlimited resources and access to all the features in the game? If you answered yes to any of these questions, then you need to try Wormate.io APK Hile. This is a modified version of the popular worm game Wormate.io that gives you unlimited money and gems, all skins and cards unlocked, and an ads-free interface. In this article, we will tell you what Wormate.io is, what Wormate.io APK Hile is, what benefits it offers, and how to download and install it on your android device. Let's get started!

      -

      Introduction

      -

      What is Wormate.io?

      -

      Wormate.io is a fun and addictive online multiplayer game where you control a worm and try to eat as much candy, cakes, cookies, and other sweets as possible. The more you eat, the bigger and longer you grow. But be careful, there are other worms in the arena who want to eat you. You can use your speed boost to escape or attack them, but it will cost you some of your length. You can also collect power-ups and cards that give you special abilities and bonuses. The game has colorful graphics, smooth gameplay, and a variety of skins and themes to choose from. You can play with your friends or with players from all over the world.

      -

      wormate.io apk hile


      Download Ziphttps://urllie.com/2uNE1m



      -

      What is Wormate.io APK Hile?

      -

      Wormate.io APK Hile is a modified version of Wormate.io that gives you unlimited money and gems, all skins and cards unlocked, and an ads-free interface. This means that you can buy anything you want in the game without worrying about running out of resources. You can also customize your worm with any skin or theme you like, and use any card or power-up you want. You can also enjoy the game without any annoying ads or pop-ups. Wormate.io APK Hile is not available on the Google Play Store, but you can download it from a trusted third-party website for free.

      -

      Benefits of Wormate.io APK Hile

      -

      Unlimited Money and Gems

      -

      Money and gems are the main currencies in Wormate.io. You can use them to buy skins, themes, cards, power-ups, and other items in the game. You can also use them to upgrade your worm's abilities and stats. However, earning money and gems in the game can be slow and tedious. You have to play for a long time, complete missions, watch ads, or spend real money to get them. But with Wormate.io APK Hile, you don't have to worry about that. You will get unlimited money and gems as soon as you start the game. You can spend them as much as you want without running out.

      -

      All Skins and Cards Unlocked

      -

      Skins and cards are the main ways to customize your worm in Wormate.io. Skins change the appearance of your worm, while cards give you special abilities and bonuses. There are hundreds of skins and cards in the game, each with different effects and prices. Some of them are rare and hard to get. You have to play for a long time, complete missions, or spend money and gems to unlock them. But with Wormate.io APK Hile, you don't have to do that. You will get all skins and cards unlocked as soon as you start the game. You can choose any skin or card you like without any restrictions.

      - Ads-Free Interface -

      Ads are one of the most annoying things in any online game. They interrupt your gameplay, slow down your device, and sometimes even contain malware or viruses. They also consume your data and battery. Wormate.io is no exception. The game has a lot of ads that pop up every few minutes or after every game. You have to watch them or pay money to remove them. But with Wormate.io APK Hile, you don't have to deal with that. You will get an ads-free interface as soon as you start the game. You can enjoy the game without any distractions or risks.

      -

      wormate.io mod apk unlimited money and gems
      -wormate.io hack apk download for android
      -wormate.io apk hile nasıl yapılır
      -wormate.io mod menu apk 2023
      -wormate.io apk hile indir son sürüm
      -wormate.io mod apk no ads
      -wormate.io hack apk latest version
      -wormate.io apk hile yapma programı
      -wormate.io mod apk all skins unlocked
      -wormate.io apk hile linki
      -wormate.io mod apk free download
      -wormate.io hack apk 2023
      -wormate.io apk hile nasıl indirilir
      -wormate.io mod apk unlimited coins and diamonds
      -wormate.io apk hile kurulumu
      -wormate.io mod apk revdl
      -wormate.io hack apk ios
      -wormate.io apk hile güncel
      -wormate.io mod apk offline
      -wormate.io apk hile oyun indir club
      -wormate.io mod apk rexdl
      -wormate.io hack apk pc
      -wormate.io apk hile 2023
      -wormate.io mod apk happymod
      -wormate.io hack apk online
      -wormate.io apk hile video
      -wormate.io mod apk android 1
      -wormate.io hack apk unlimited money and gems download
      -wormate.io apk hile yükleme
      -wormate.io mod apk an1
      -wormate.io hack apk mod menu
      -wormate.io apk hile nasıl yüklenir
      -wormate.io mod apk unlimited everything
      -wormate.io hack apk mediafıre
      -wormate.io apk hile oyna
      -wormate.io mod apk god mode
      -wormate.io hack apk 4.0.15
      -wormate.io apk hile türkçe
      -wormate.io mod apk unlimited lives and boosters
      -wormate.io hack apk pure
      -wormate.io apk hile yeni sürüm
      -wormate.io mod apk vip unlocked
      -wormate.io hack apk no root
      -wormate.io apk hile nasıl kurulur
      -wormate.io mod apk unlimited speed and zoom
      -wormate.io hack apk mega.nz
      -wormate.io apk hile indir 2023
      -wormate.io mod apk unlimited health and energy
      -wormate.io hack apk uptodown

      -

      How to Download and Install Wormate.io APK Hile

      -

      Downloading and installing Wormate.io APK Hile is very easy and fast. You just need to follow these simple steps:

      -

      Step 1: Enable Unknown Sources

      -

      Since Wormate.io APK Hile is not available on the Google Play Store, you need to enable unknown sources on your device. This will allow you to install apps from third-party websites. To do this, go to your device's settings, then security, then unknown sources, and turn it on. You may get a warning message, but don't worry, it's safe.

      -

      Step 2: Download the APK File

      -

      Next, you need to download the APK file of Wormate.io APK Hile from a trusted website. You can use this link to download it for free. The file size is about 20 MB, so it won't take long to download. Make sure you have enough space on your device before downloading.

      -

      Step 3: Install the APK File

      -

      Once you have downloaded the APK file, you need to install it on your device. To do this, locate the file in your downloads folder or wherever you saved it, and tap on it. You may get a confirmation message, just tap on install and wait for a few seconds.

      -

      Step 4: Launch the Game and Enjoy

      -

      Finally, you can launch the game and enjoy all the benefits of Wormate.io APK Hile. You will see that you have unlimited money and gems, all skins and cards unlocked, and an ads-free interface. You can start playing right away and become the biggest and longest worm in the arena.

      -

      Conclusion

      -

      Wormate.io is a fun and addictive online multiplayer game where you control a worm and try to eat as much candy, cakes, cookies, and other sweets as possible. The more you eat, the bigger and longer you grow. But be careful, there are other worms in the arena who want to eat you. You can use your speed boost to escape or attack them, but it will cost you some of your length. You can also collect power-ups and cards that give you special abilities and bonuses.

      -

      If you want to have more fun and advantages in the game, you should try Wormate.io APK Hile. This is a modified version of Wormate.io that gives you unlimited money and gems, all skins and cards unlocked, and an ads-free interface. This means that you can buy anything you want in the game without worrying about running out of resources. You can also customize your worm with any skin or theme you like, and use any card or power-up you want. You can also enjoy the game without any annoying ads or pop-ups.

      -

      To download and install Wormate.io APK Hile on your android device, you just need to follow these simple steps:

      -
        -
      • Enable unknown sources on your device.
      • -
      • Download the APK file from a trusted website.
      • -
      • Install the APK file on your device.
      • -
      • Launch the game and enjoy.
      • -
      -

      We hope this article was helpful for you. If you have any questions or feedback, please let us know in the comments section below. Thank you for reading!

      -

      FAQs

      -

      Here are some frequently asked questions about Wormate.io APK Hile:

      -

      Is Wormate.io APK Hile safe?

      -

      Yes, Wormate.io APK Hile is safe to use. It does not contain any malware or viruses that can harm your device or data. However, you should always download it from a trusted website and scan it with an antivirus before installing it.

      -

      Is Wormate.io APK Hile legal?

      -

      No, Wormate.io APK Hile is not legal. It violates the terms and conditions of the original game developer and may result in a ban or suspension of your account. Use it at your own risk.

      -

      Can I play Wormate.io APK Hile

      Can I play Wormate.io APK Hile with my friends?

      -

      Yes, you can play Wormate.io APK Hile with your friends. You can either join the same server or create a private room and invite them. You can also chat with them and share your scores and achievements.

      -

      Can I update Wormate.io APK Hile?

      -

      No, you cannot update Wormate.io APK Hile. If you try to update it from the Google Play Store, it will overwrite the modified version and you will lose all the benefits. If you want to get the latest version of Wormate.io APK Hile, you have to download it again from a trusted website and install it on your device.

      -

      Can I use Wormate.io APK Hile on other devices?

      -

      Yes, you can use Wormate.io APK Hile on other devices. You just need to download the APK file from a trusted website and transfer it to your other device using a USB cable, Bluetooth, or Wi-Fi. Then, you can install it and play it as usual.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/latex_utils.py b/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/latex_utils.py deleted file mode 100644 index eb65a8a915d2cbc66a346e42a5f2a17ee07bb585..0000000000000000000000000000000000000000 --- a/spaces/fb700/chatglm-fitness-RLHF/crazy_functions/latex_utils.py +++ /dev/null @@ -1,788 +0,0 @@ -from toolbox import update_ui, update_ui_lastest_msg # 刷新Gradio前端界面 -from toolbox import zip_folder, objdump, objload, promote_file_to_downloadzone -import os, shutil -import re -import numpy as np -pj = os.path.join - -""" -======================================================================== -Part One -Latex segmentation with a binary mask (PRESERVE=0, TRANSFORM=1) -======================================================================== -""" -PRESERVE = 0 -TRANSFORM = 1 - -def set_forbidden_text(text, mask, pattern, flags=0): - """ - Add a preserve text area in this paper - e.g. with pattern = r"\\begin\{algorithm\}(.*?)\\end\{algorithm\}" - you can mask out (mask = PRESERVE so that text become untouchable for GPT) - everything between "\begin{equation}" and "\end{equation}" - """ - if isinstance(pattern, list): pattern = '|'.join(pattern) - pattern_compile = re.compile(pattern, flags) - for res in pattern_compile.finditer(text): - mask[res.span()[0]:res.span()[1]] = PRESERVE - return text, mask - -def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True): - """ - Move area out of preserve area (make text editable for GPT) - count the number of the braces so as to catch compelete text area. - e.g. - \begin{abstract} blablablablablabla. \end{abstract} - """ - if isinstance(pattern, list): pattern = '|'.join(pattern) - pattern_compile = re.compile(pattern, flags) - for res in pattern_compile.finditer(text): - if not forbid_wrapper: - mask[res.span()[0]:res.span()[1]] = TRANSFORM - else: - mask[res.regs[0][0]: res.regs[1][0]] = PRESERVE # '\\begin{abstract}' - mask[res.regs[1][0]: res.regs[1][1]] = TRANSFORM # abstract - mask[res.regs[1][1]: res.regs[0][1]] = PRESERVE # abstract - return text, mask - -def set_forbidden_text_careful_brace(text, mask, pattern, flags=0): - """ - Add a preserve text area in this paper (text become untouchable for GPT). - count the number of the braces so as to catch compelete text area. - e.g. - \caption{blablablablabla\texbf{blablabla}blablabla.} - """ - pattern_compile = re.compile(pattern, flags) - for res in pattern_compile.finditer(text): - brace_level = -1 - p = begin = end = res.regs[0][0] - for _ in range(1024*16): - if text[p] == '}' and brace_level == 0: break - elif text[p] == '}': brace_level -= 1 - elif text[p] == '{': brace_level += 1 - p += 1 - end = p+1 - mask[begin:end] = PRESERVE - return text, mask - -def reverse_forbidden_text_careful_brace(text, mask, pattern, flags=0, forbid_wrapper=True): - """ - Move area out of preserve area (make text editable for GPT) - count the number of the braces so as to catch compelete text area. - e.g. - \caption{blablablablabla\texbf{blablabla}blablabla.} - """ - pattern_compile = re.compile(pattern, flags) - for res in pattern_compile.finditer(text): - brace_level = 0 - p = begin = end = res.regs[1][0] - for _ in range(1024*16): - if text[p] == '}' and brace_level == 0: break - elif text[p] == '}': brace_level -= 1 - elif text[p] == '{': brace_level += 1 - p += 1 - end = p - mask[begin:end] = TRANSFORM - if forbid_wrapper: - mask[res.regs[0][0]:begin] = PRESERVE - mask[end:res.regs[0][1]] = PRESERVE - return text, mask - -def set_forbidden_text_begin_end(text, mask, pattern, flags=0, limit_n_lines=42): - """ - Find all \begin{} ... \end{} text block that with less than limit_n_lines lines. - Add it to preserve area - """ - pattern_compile = re.compile(pattern, flags) - def search_with_line_limit(text, mask): - for res in pattern_compile.finditer(text): - cmd = res.group(1) # begin{what} - this = res.group(2) # content between begin and end - this_mask = mask[res.regs[2][0]:res.regs[2][1]] - white_list = ['document', 'abstract', 'lemma', 'definition', 'sproof', - 'em', 'emph', 'textit', 'textbf', 'itemize', 'enumerate'] - if (cmd in white_list) or this.count('\n') >= limit_n_lines: # use a magical number 42 - this, this_mask = search_with_line_limit(this, this_mask) - mask[res.regs[2][0]:res.regs[2][1]] = this_mask - else: - mask[res.regs[0][0]:res.regs[0][1]] = PRESERVE - return text, mask - return search_with_line_limit(text, mask) - -class LinkedListNode(): - """ - Linked List Node - """ - def __init__(self, string, preserve=True) -> None: - self.string = string - self.preserve = preserve - self.next = None - # self.begin_line = 0 - # self.begin_char = 0 - -def convert_to_linklist(text, mask): - root = LinkedListNode("", preserve=True) - current_node = root - for c, m, i in zip(text, mask, range(len(text))): - if (m==PRESERVE and current_node.preserve) \ - or (m==TRANSFORM and not current_node.preserve): - # add - current_node.string += c - else: - current_node.next = LinkedListNode(c, preserve=(m==PRESERVE)) - current_node = current_node.next - return root -""" -======================================================================== -Latex Merge File -======================================================================== -""" - -def 寻找Latex主文件(file_manifest, mode): - """ - 在多Tex文档中,寻找主文件,必须包含documentclass,返回找到的第一个。 - P.S. 但愿没人把latex模板放在里面传进来 (6.25 加入判定latex模板的代码) - """ - canidates = [] - for texf in file_manifest: - if os.path.basename(texf).startswith('merge'): - continue - with open(texf, 'r', encoding='utf8') as f: - file_content = f.read() - if r'\documentclass' in file_content: - canidates.append(texf) - else: - continue - - if len(canidates) == 0: - raise RuntimeError('无法找到一个主Tex文件(包含documentclass关键字)') - elif len(canidates) == 1: - return canidates[0] - else: # if len(canidates) >= 2 通过一些Latex模板中常见(但通常不会出现在正文)的单词,对不同latex源文件扣分,取评分最高者返回 - canidates_score = [] - # 给出一些判定模板文档的词作为扣分项 - unexpected_words = ['\LaTeX', 'manuscript', 'Guidelines', 'font', 'citations', 'rejected', 'blind review', 'reviewers'] - expected_words = ['\input', '\ref', '\cite'] - for texf in canidates: - canidates_score.append(0) - with open(texf, 'r', encoding='utf8') as f: - file_content = f.read() - for uw in unexpected_words: - if uw in file_content: - canidates_score[-1] -= 1 - for uw in expected_words: - if uw in file_content: - canidates_score[-1] += 1 - select = np.argmax(canidates_score) # 取评分最高者返回 - return canidates[select] - -def rm_comments(main_file): - new_file_remove_comment_lines = [] - for l in main_file.splitlines(): - # 删除整行的空注释 - if l.lstrip().startswith("%"): - pass - else: - new_file_remove_comment_lines.append(l) - main_file = '\n'.join(new_file_remove_comment_lines) - # main_file = re.sub(r"\\include{(.*?)}", r"\\input{\1}", main_file) # 将 \include 命令转换为 \input 命令 - main_file = re.sub(r'(? 0 and node_string.count('\_') > final_tex.count('\_'): - # walk and replace any _ without \ - final_tex = re.sub(r"(?') - if not node.preserve: - segment_parts_for_gpt.append(node.string) - f.write(f'

      #{show_html}#

      ') - else: - f.write(f'

      {show_html}

      ') - node = node.next - if node is None: break - - for n in nodes: n.next = None # break - return_dict['nodes'] = nodes - return_dict['segment_parts_for_gpt'] = segment_parts_for_gpt - return return_dict - - - -class LatexPaperSplit(): - """ - break down latex file to a linked list, - each node use a preserve flag to indicate whether it should - be proccessed by GPT. - """ - def __init__(self) -> None: - self.nodes = None - self.msg = "*{\\scriptsize\\textbf{警告:该PDF由GPT-Academic开源项目调用大语言模型+Latex翻译插件一键生成," + \ - "版权归原文作者所有。翻译内容可靠性无保障,请仔细鉴别并以原文为准。" + \ - "项目Github地址 \\url{https://github.com/binary-husky/gpt_academic/}。" - # 请您不要删除或修改这行警告,除非您是论文的原作者(如果您是论文原作者,欢迎加REAME中的QQ联系开发者) - self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\" - - def merge_result(self, arr, mode, msg): - """ - Merge the result after the GPT process completed - """ - result_string = "" - p = 0 - for node in self.nodes: - if node.preserve: - result_string += node.string - else: - result_string += fix_content(arr[p], node.string) - p += 1 - if mode == 'translate_zh': - pattern = re.compile(r'\\begin\{abstract\}.*\n') - match = pattern.search(result_string) - if not match: - # match \abstract{xxxx} - pattern_compile = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL) - match = pattern_compile.search(result_string) - position = match.regs[1][0] - else: - # match \begin{abstract}xxxx\end{abstract} - position = match.end() - result_string = result_string[:position] + self.msg + msg + self.msg_declare + result_string[position:] - return result_string - - def split(self, txt, project_folder, opts): - """ - break down latex file to a linked list, - each node use a preserve flag to indicate whether it should - be proccessed by GPT. - P.S. use multiprocessing to avoid timeout error - """ - import multiprocessing - manager = multiprocessing.Manager() - return_dict = manager.dict() - p = multiprocessing.Process( - target=split_subprocess, - args=(txt, project_folder, return_dict, opts)) - p.start() - p.join() - p.close() - self.nodes = return_dict['nodes'] - self.sp = return_dict['segment_parts_for_gpt'] - return self.sp - - - -class LatexPaperFileGroup(): - """ - use tokenizer to break down text according to max_token_limit - """ - def __init__(self): - self.file_paths = [] - self.file_contents = [] - self.sp_file_contents = [] - self.sp_file_index = [] - self.sp_file_tag = [] - - # count_token - from request_llm.bridge_all import model_info - enc = model_info["gpt-3.5-turbo"]['tokenizer'] - def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) - self.get_token_num = get_token_num - - def run_file_split(self, max_token_limit=1900): - """ - use tokenizer to break down text according to max_token_limit - """ - for index, file_content in enumerate(self.file_contents): - if self.get_token_num(file_content) < max_token_limit: - self.sp_file_contents.append(file_content) - self.sp_file_index.append(index) - self.sp_file_tag.append(self.file_paths[index]) - else: - from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf - segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit) - for j, segment in enumerate(segments): - self.sp_file_contents.append(segment) - self.sp_file_index.append(index) - self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex") - print('Segmentation: done') - - def merge_result(self): - self.file_result = ["" for _ in range(len(self.file_paths))] - for r, k in zip(self.sp_file_result, self.sp_file_index): - self.file_result[k] += r - - def write_result(self): - manifest = [] - for path, res in zip(self.file_paths, self.file_result): - with open(path + '.polish.tex', 'w', encoding='utf8') as f: - manifest.append(path + '.polish.tex') - f.write(res) - return manifest - -def write_html(sp_file_contents, sp_file_result, chatbot, project_folder): - - # write html - try: - import shutil - from .crazy_utils import construct_html - from toolbox import gen_time_str - ch = construct_html() - orig = "" - trans = "" - final = [] - for c,r in zip(sp_file_contents, sp_file_result): - final.append(c) - final.append(r) - for i, k in enumerate(final): - if i%2==0: - orig = k - if i%2==1: - trans = k - ch.add_row(a=orig, b=trans) - create_report_file_name = f"{gen_time_str()}.trans.html" - ch.save_file(create_report_file_name) - shutil.copyfile(pj('./gpt_log/', create_report_file_name), pj(project_folder, create_report_file_name)) - promote_file_to_downloadzone(file=f'./gpt_log/{create_report_file_name}', chatbot=chatbot) - except: - from toolbox import trimmed_format_exc - print('writing html result failed:', trimmed_format_exc()) - -def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, mode='proofread', switch_prompt=None, opts=[]): - import time, os, re - from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency - from .latex_utils import LatexPaperFileGroup, merge_tex_files, LatexPaperSplit, 寻找Latex主文件 - - # <-------- 寻找主tex文件 ----------> - maintex = 寻找Latex主文件(file_manifest, mode) - chatbot.append((f"定位主Latex文件", f'[Local Message] 分析结果:该项目的Latex主文件是{maintex}, 如果分析错误, 请立即终止程序, 删除或修改歧义文件, 然后重试。主程序即将开始, 请稍候。')) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - time.sleep(3) - - # <-------- 读取Latex文件, 将多文件tex工程融合为一个巨型tex ----------> - main_tex_basename = os.path.basename(maintex) - assert main_tex_basename.endswith('.tex') - main_tex_basename_bare = main_tex_basename[:-4] - may_exist_bbl = pj(project_folder, f'{main_tex_basename_bare}.bbl') - if os.path.exists(may_exist_bbl): - shutil.copyfile(may_exist_bbl, pj(project_folder, f'merge.bbl')) - shutil.copyfile(may_exist_bbl, pj(project_folder, f'merge_{mode}.bbl')) - shutil.copyfile(may_exist_bbl, pj(project_folder, f'merge_diff.bbl')) - - with open(maintex, 'r', encoding='utf-8', errors='replace') as f: - content = f.read() - merged_content = merge_tex_files(project_folder, content, mode) - - with open(project_folder + '/merge.tex', 'w', encoding='utf-8', errors='replace') as f: - f.write(merged_content) - - # <-------- 精细切分latex文件 ----------> - chatbot.append((f"Latex文件融合完成", f'[Local Message] 正在精细切分latex文件,这需要一段时间计算,文档越长耗时越长,请耐心等待。')) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - lps = LatexPaperSplit() - res = lps.split(merged_content, project_folder, opts) # 消耗时间的函数 - - # <-------- 拆分过长的latex片段 ----------> - pfg = LatexPaperFileGroup() - for index, r in enumerate(res): - pfg.file_paths.append('segment-' + str(index)) - pfg.file_contents.append(r) - - pfg.run_file_split(max_token_limit=1024) - n_split = len(pfg.sp_file_contents) - - # <-------- 根据需要切换prompt ----------> - inputs_array, sys_prompt_array = switch_prompt(pfg, mode) - inputs_show_user_array = [f"{mode} {f}" for f in pfg.sp_file_tag] - - if os.path.exists(pj(project_folder,'temp.pkl')): - - # <-------- 【仅调试】如果存在调试缓存文件,则跳过GPT请求环节 ----------> - pfg = objload(file=pj(project_folder,'temp.pkl')) - - else: - # <-------- gpt 多线程请求 ----------> - gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array=inputs_array, - inputs_show_user_array=inputs_show_user_array, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history_array=[[""] for _ in range(n_split)], - sys_prompt_array=sys_prompt_array, - # max_workers=5, # 并行任务数量限制, 最多同时执行5个, 其他的排队等待 - scroller_max_len = 40 - ) - - # <-------- 文本碎片重组为完整的tex片段 ----------> - pfg.sp_file_result = [] - for i_say, gpt_say, orig_content in zip(gpt_response_collection[0::2], gpt_response_collection[1::2], pfg.sp_file_contents): - pfg.sp_file_result.append(gpt_say) - pfg.merge_result() - - # <-------- 临时存储用于调试 ----------> - pfg.get_token_num = None - objdump(pfg, file=pj(project_folder,'temp.pkl')) - - write_html(pfg.sp_file_contents, pfg.sp_file_result, chatbot=chatbot, project_folder=project_folder) - - # <-------- 写出文件 ----------> - msg = f"当前大语言模型: {llm_kwargs['llm_model']},当前语言模型温度设定: {llm_kwargs['temperature']}。" - final_tex = lps.merge_result(pfg.file_result, mode, msg) - with open(project_folder + f'/merge_{mode}.tex', 'w', encoding='utf-8', errors='replace') as f: - if mode != 'translate_zh' or "binary" in final_tex: f.write(final_tex) - - - # <-------- 整理结果, 退出 ----------> - chatbot.append((f"完成了吗?", 'GPT结果已输出, 正在编译PDF')) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # <-------- 返回 ----------> - return project_folder + f'/merge_{mode}.tex' - - - -def remove_buggy_lines(file_path, log_path, tex_name, tex_name_pure, n_fix, work_folder_modified): - try: - with open(log_path, 'r', encoding='utf-8', errors='replace') as f: - log = f.read() - with open(file_path, 'r', encoding='utf-8', errors='replace') as f: - file_lines = f.readlines() - import re - buggy_lines = re.findall(tex_name+':([0-9]{1,5}):', log) - buggy_lines = [int(l) for l in buggy_lines] - buggy_lines = sorted(buggy_lines) - print("removing lines that has errors", buggy_lines) - file_lines.pop(buggy_lines[0]-1) - with open(pj(work_folder_modified, f"{tex_name_pure}_fix_{n_fix}.tex"), 'w', encoding='utf-8', errors='replace') as f: - f.writelines(file_lines) - return True, f"{tex_name_pure}_fix_{n_fix}", buggy_lines - except: - print("Fatal error occurred, but we cannot identify error, please download zip, read latex log, and compile manually.") - return False, -1, [-1] - -def compile_latex_with_timeout(command, cwd, timeout=60): - import subprocess - process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd) - try: - stdout, stderr = process.communicate(timeout=timeout) - except subprocess.TimeoutExpired: - process.kill() - stdout, stderr = process.communicate() - print("Process timed out!") - return False - return True - -def 编译Latex(chatbot, history, main_file_original, main_file_modified, work_folder_original, work_folder_modified, work_folder, mode='default'): - import os, time - current_dir = os.getcwd() - n_fix = 1 - max_try = 32 - chatbot.append([f"正在编译PDF文档", f'编译已经开始。当前工作路径为{work_folder},如果程序停顿5分钟以上,请直接去该路径下取回翻译结果,或者重启之后再度尝试 ...']); yield from update_ui(chatbot=chatbot, history=history) - chatbot.append([f"正在编译PDF文档", '...']); yield from update_ui(chatbot=chatbot, history=history); time.sleep(1); chatbot[-1] = list(chatbot[-1]) # 刷新界面 - yield from update_ui_lastest_msg('编译已经开始...', chatbot, history) # 刷新Gradio前端界面 - - while True: - import os - - # https://stackoverflow.com/questions/738755/dont-make-me-manually-abort-a-latex-compile-when-theres-an-error - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译原始PDF ...', chatbot, history) # 刷新Gradio前端界面 - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original) - - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译转化后的PDF ...', chatbot, history) # 刷新Gradio前端界面 - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified) - - if ok and os.path.exists(pj(work_folder_modified, f'{main_file_modified}.pdf')): - # 只有第二步成功,才能继续下面的步骤 - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译BibTex ...', chatbot, history) # 刷新Gradio前端界面 - if not os.path.exists(pj(work_folder_original, f'{main_file_original}.bbl')): - ok = compile_latex_with_timeout(f'bibtex {main_file_original}.aux', work_folder_original) - if not os.path.exists(pj(work_folder_modified, f'{main_file_modified}.bbl')): - ok = compile_latex_with_timeout(f'bibtex {main_file_modified}.aux', work_folder_modified) - - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 编译文献交叉引用 ...', chatbot, history) # 刷新Gradio前端界面 - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original) - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified) - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_original}.tex', work_folder_original) - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error {main_file_modified}.tex', work_folder_modified) - - if mode!='translate_zh': - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 使用latexdiff生成论文转化前后对比 ...', chatbot, history) # 刷新Gradio前端界面 - print( f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex') - ok = compile_latex_with_timeout(f'latexdiff --encoding=utf8 --append-safecmd=subfile {work_folder_original}/{main_file_original}.tex {work_folder_modified}/{main_file_modified}.tex --flatten > {work_folder}/merge_diff.tex') - - yield from update_ui_lastest_msg(f'尝试第 {n_fix}/{max_try} 次编译, 正在编译对比PDF ...', chatbot, history) # 刷新Gradio前端界面 - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder) - ok = compile_latex_with_timeout(f'bibtex merge_diff.aux', work_folder) - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder) - ok = compile_latex_with_timeout(f'pdflatex -interaction=batchmode -file-line-error merge_diff.tex', work_folder) - - - # <---------- 检查结果 -----------> - results_ = "" - original_pdf_success = os.path.exists(pj(work_folder_original, f'{main_file_original}.pdf')) - modified_pdf_success = os.path.exists(pj(work_folder_modified, f'{main_file_modified}.pdf')) - diff_pdf_success = os.path.exists(pj(work_folder, f'merge_diff.pdf')) - results_ += f"原始PDF编译是否成功: {original_pdf_success};" - results_ += f"转化PDF编译是否成功: {modified_pdf_success};" - results_ += f"对比PDF编译是否成功: {diff_pdf_success};" - yield from update_ui_lastest_msg(f'第{n_fix}编译结束:
      {results_}...', chatbot, history) # 刷新Gradio前端界面 - - if diff_pdf_success: - result_pdf = pj(work_folder_modified, f'merge_diff.pdf') # get pdf path - promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI - if modified_pdf_success: - yield from update_ui_lastest_msg(f'转化PDF编译已经成功, 即将退出 ...', chatbot, history) # 刷新Gradio前端界面 - result_pdf = pj(work_folder_modified, f'{main_file_modified}.pdf') # get pdf path - if os.path.exists(pj(work_folder, '..', 'translation')): - shutil.copyfile(result_pdf, pj(work_folder, '..', 'translation', 'translate_zh.pdf')) - promote_file_to_downloadzone(result_pdf, rename_file=None, chatbot=chatbot) # promote file to web UI - return True # 成功啦 - else: - if n_fix>=max_try: break - n_fix += 1 - can_retry, main_file_modified, buggy_lines = remove_buggy_lines( - file_path=pj(work_folder_modified, f'{main_file_modified}.tex'), - log_path=pj(work_folder_modified, f'{main_file_modified}.log'), - tex_name=f'{main_file_modified}.tex', - tex_name_pure=f'{main_file_modified}', - n_fix=n_fix, - work_folder_modified=work_folder_modified, - ) - yield from update_ui_lastest_msg(f'由于最为关键的转化PDF编译失败, 将根据报错信息修正tex源文件并重试, 当前报错的latex代码处于第{buggy_lines}行 ...', chatbot, history) # 刷新Gradio前端界面 - if not can_retry: break - - return False # 失败啦 - - - diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/statuses/index.js b/spaces/fffiloni/controlnet-animation-doodle/node_modules/statuses/index.js deleted file mode 100644 index ea351c553520e1bc62d53b97a316194c48ac49e6..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/statuses/index.js +++ /dev/null @@ -1,146 +0,0 @@ -/*! - * statuses - * Copyright(c) 2014 Jonathan Ong - * Copyright(c) 2016 Douglas Christopher Wilson - * MIT Licensed - */ - -'use strict' - -/** - * Module dependencies. - * @private - */ - -var codes = require('./codes.json') - -/** - * Module exports. - * @public - */ - -module.exports = status - -// status code to message map -status.message = codes - -// status message (lower-case) to code map -status.code = createMessageToStatusCodeMap(codes) - -// array of status codes -status.codes = createStatusCodeList(codes) - -// status codes for redirects -status.redirect = { - 300: true, - 301: true, - 302: true, - 303: true, - 305: true, - 307: true, - 308: true -} - -// status codes for empty bodies -status.empty = { - 204: true, - 205: true, - 304: true -} - -// status codes for when you should retry the request -status.retry = { - 502: true, - 503: true, - 504: true -} - -/** - * Create a map of message to status code. - * @private - */ - -function createMessageToStatusCodeMap (codes) { - var map = {} - - Object.keys(codes).forEach(function forEachCode (code) { - var message = codes[code] - var status = Number(code) - - // populate map - map[message.toLowerCase()] = status - }) - - return map -} - -/** - * Create a list of all status codes. - * @private - */ - -function createStatusCodeList (codes) { - return Object.keys(codes).map(function mapCode (code) { - return Number(code) - }) -} - -/** - * Get the status code for given message. - * @private - */ - -function getStatusCode (message) { - var msg = message.toLowerCase() - - if (!Object.prototype.hasOwnProperty.call(status.code, msg)) { - throw new Error('invalid status message: "' + message + '"') - } - - return status.code[msg] -} - -/** - * Get the status message for given code. - * @private - */ - -function getStatusMessage (code) { - if (!Object.prototype.hasOwnProperty.call(status.message, code)) { - throw new Error('invalid status code: ' + code) - } - - return status.message[code] -} - -/** - * Get the status code. - * - * Given a number, this will throw if it is not a known status - * code, otherwise the code will be returned. Given a string, - * the string will be parsed for a number and return the code - * if valid, otherwise will lookup the code assuming this is - * the status message. - * - * @param {string|number} code - * @returns {number} - * @public - */ - -function status (code) { - if (typeof code === 'number') { - return getStatusMessage(code) - } - - if (typeof code !== 'string') { - throw new TypeError('code must be a number or string') - } - - // '403' - var n = parseInt(code, 10) - if (!isNaN(n)) { - return getStatusMessage(n) - } - - return getStatusCode(code) -} diff --git a/spaces/firsk/ai_otto/commons.py b/spaces/firsk/ai_otto/commons.py deleted file mode 100644 index d3fa07f65b1681e1f469b04b2fe689b7c174eaaa..0000000000000000000000000000000000000000 --- a/spaces/firsk/ai_otto/commons.py +++ /dev/null @@ -1,160 +0,0 @@ -import math -import torch -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size * dilation - dilation) / 2) - - -def convert_pad_shape(pad_shape): - layer = pad_shape[::-1] - pad_shape = [item for sublist in layer for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += ( - 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) - ) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / ( - num_timescales - 1 - ) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment - ) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - layer = pad_shape[::-1] - pad_shape = [item for sublist in layer for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2, 3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1.0 / norm_type) - return total_norm diff --git a/spaces/fkhuggingme/gpt-academic/crazy_functions/test_project/cpp/cppipc/waiter.h b/spaces/fkhuggingme/gpt-academic/crazy_functions/test_project/cpp/cppipc/waiter.h deleted file mode 100644 index ee45fe3517be95ac1688a3e3540189edeb0d860c..0000000000000000000000000000000000000000 --- a/spaces/fkhuggingme/gpt-academic/crazy_functions/test_project/cpp/cppipc/waiter.h +++ /dev/null @@ -1,83 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -#include "libipc/def.h" -#include "libipc/mutex.h" -#include "libipc/condition.h" -#include "libipc/platform/detail.h" - -namespace ipc { -namespace detail { - -class waiter { - ipc::sync::condition cond_; - ipc::sync::mutex lock_; - std::atomic quit_ {false}; - -public: - static void init(); - - waiter() = default; - waiter(char const *name) { - open(name); - } - - ~waiter() { - close(); - } - - bool valid() const noexcept { - return cond_.valid() && lock_.valid(); - } - - bool open(char const *name) noexcept { - quit_.store(false, std::memory_order_relaxed); - if (!cond_.open((std::string{"_waiter_cond_"} + name).c_str())) { - return false; - } - if (!lock_.open((std::string{"_waiter_lock_"} + name).c_str())) { - cond_.close(); - return false; - } - return valid(); - } - - void close() noexcept { - cond_.close(); - lock_.close(); - } - - template - bool wait_if(F &&pred, std::uint64_t tm = ipc::invalid_value) noexcept { - IPC_UNUSED_ std::lock_guard guard {lock_}; - while ([this, &pred] { - return !quit_.load(std::memory_order_relaxed) - && std::forward(pred)(); - }()) { - if (!cond_.wait(lock_, tm)) return false; - } - return true; - } - - bool notify() noexcept { - std::lock_guard{lock_}; // barrier - return cond_.notify(lock_); - } - - bool broadcast() noexcept { - std::lock_guard{lock_}; // barrier - return cond_.broadcast(lock_); - } - - bool quit_waiting() { - quit_.store(true, std::memory_order_release); - return broadcast(); - } -}; - -} // namespace detail -} // namespace ipc diff --git a/spaces/flax-community/chef-transformer/asset/css/style.css b/spaces/flax-community/chef-transformer/asset/css/style.css deleted file mode 100644 index fa3e0df443653a7a3fe162fdd66f2c1278e62aaf..0000000000000000000000000000000000000000 --- a/spaces/flax-community/chef-transformer/asset/css/style.css +++ /dev/null @@ -1,132 +0,0 @@ -body { - background-color: #fff; -} - -.font-title { - font-family: 'Poppins', sans-serif !important; -} -.font-body { - font-family: 'Montserrat', sans-serif !important; -} -.text-bold { - font-weight: normal !important; -} -.text-bold { - font-weight: bold !important; -} - -.fullScreenFrame > div { - display: flex; - justify-content: center; -} -.comma:not(:empty) ~ .comma:not(:empty):before { - content: ", "; -} -.strong { - font-weight: bold; -} -.d-block { - display: block; -} -.extra-info { - font-weight: normal; - font-style: italic; - font-size: small; -} - -.contributors { - margin-bottom: 10px; - border-bottom: 1px solid #f3f3f3; - padding-bottom: 10px; -} -.contributors a.contributor { - text-decoration: none; - color: #585858; -} -.contributors a.contributor:hover { - text-decoration: underline; -} - -.story-box { - overflow-y: scroll; - max-height: 240px; -} - -.story-box p { - font-size: 0.85rem; -} -.story-box pre { - font-size: 0.6rem; -} - -.r-text-recipe { - /* padding-left: 30px; - margin-left: 10px;*/ - border-right: 1px dashed #eee; -} - -.divider { - margin: 5px 0; - width: 400px; - max-width: 100%; - position:relative; -} - -.divider-mask { - overflow: hidden; - height: 20px; -} - -.divider-mask:after { - content: ''; - display: block; - width: 170px; - height: 0px; - border-bottom: 2px solid #e9a726; - border-radius: 10px; - left: 0px; -} - -.r-text-recipe .food-title { - text-align: left; -} -.r-text-recipe .food-title img { - max-width: 300px; - float: left; - margin-right: 30px; - margin-bottom: 30px; -} -.r-text-recipe .food-title h2 { -} -.ingredients {} -.ingredients-list { - columns: 2; - -webkit-columns: 2; - -moz-columns: 2; -} -.directions { - clear: both; - float: none; - padding-top: 20px; - display: block; -} -.directions-list {} - - -@media only screen and (max-width: 600px) { - .r-text-recipe { - border-right: 0; - border-bottom: 1px dashed #eee; - } - .r-text-recipe .food-title img { - max-width: 200px; - } - .directions { - padding-top: 0px; - } - .ingredients-list { - columns: 1; - -webkit-columns: 1; - -moz-columns: 1; - } -} \ No newline at end of file diff --git a/spaces/florim/MedGPT/run.sh b/spaces/florim/MedGPT/run.sh deleted file mode 100644 index edcbc44155b9ca9df83e283fdf976472c13e6492..0000000000000000000000000000000000000000 --- a/spaces/florim/MedGPT/run.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -python scripts/check_requirements.py requirements.txt -if [ $? -eq 1 ] -then - echo Installing missing packages... - pip install -r requirements.txt -fi -python -m autogpt $@ -read -p "Press any key to continue..." diff --git a/spaces/freddyaboulton/openai-whisper-large/app.py b/spaces/freddyaboulton/openai-whisper-large/app.py deleted file mode 100644 index 0d7ff1647cd2be49d72e567ea588323d68b37ae5..0000000000000000000000000000000000000000 --- a/spaces/freddyaboulton/openai-whisper-large/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/openai/whisper-large").launch() \ No newline at end of file diff --git a/spaces/gersh/OpenAssistant-falcon-40b-sft-top1-560/README.md b/spaces/gersh/OpenAssistant-falcon-40b-sft-top1-560/README.md deleted file mode 100644 index 5abd4b9331a76d3f5ad2a0e61b6400b7c2b62ed5..0000000000000000000000000000000000000000 --- a/spaces/gersh/OpenAssistant-falcon-40b-sft-top1-560/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: OpenAssistant Falcon 40b Sft Top1 560 -emoji: 📈 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.33.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Drivers HLDTST DVDRAM GT30N SCSI CdRom Device For Windows 10 64bit How to download and install - Aleyna Aksu[3].md b/spaces/gotiQspiryo/whisper-ui/examples/Drivers HLDTST DVDRAM GT30N SCSI CdRom Device For Windows 10 64bit How to download and install - Aleyna Aksu[3].md deleted file mode 100644 index 78fc267ae36e92f55b2b03ff3442c65a5d57c701..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Drivers HLDTST DVDRAM GT30N SCSI CdRom Device For Windows 10 64bit How to download and install - Aleyna Aksu[3].md +++ /dev/null @@ -1,6 +0,0 @@ -

      Drivers HLDTST DVDRAM GT30N SCSI CdRom Device For Windows 10 64bit


      DOWNLOAD >>>>> https://urlgoal.com/2uyMEI



      -
      - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/gryan-galario/manga-ocr-demo/README.md b/spaces/gryan-galario/manga-ocr-demo/README.md deleted file mode 100644 index 35674fe1c6fa8409d298fdf690520a2be92bccd3..0000000000000000000000000000000000000000 --- a/spaces/gryan-galario/manga-ocr-demo/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Manga Ocr Demo -emoji: 💻 -colorFrom: indigo -colorTo: purple -sdk: gradio -sdk_version: 2.8.14 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/gvw/js-space/app.py b/spaces/gvw/js-space/app.py deleted file mode 100644 index c21c8e562ddfe357073eea847d54c08610224946..0000000000000000000000000000000000000000 --- a/spaces/gvw/js-space/app.py +++ /dev/null @@ -1,320 +0,0 @@ -import os -from langchain.document_loaders import PyPDFLoader -from langchain.document_loaders import YoutubeLoader -from langchain.document_loaders import TextLoader -from langchain.indexes import VectorstoreIndexCreator -from langchain.chat_models import ChatOpenAI -from langchain.prompts.chat import SystemMessage, HumanMessagePromptTemplate -import gradio as gr -#from langchain.schema import SystemMessage -import openai -#from langchain.document_loaders import Docx2txtLoader -import difflib -from langchain.prompts import ChatPromptTemplate -#from langchain.llms import OpenAI -from langchain import PromptTemplate -import re -from langchain.embeddings import OpenAIEmbeddings -from langchain.text_splitter import CharacterTextSplitter -from langchain.vectorstores import Chroma -#from langchain.document_loaders import PyMuPDFLoader -from langchain.document_loaders import UnstructuredPDFLoader -from langchain.text_splitter import RecursiveCharacterTextSplitter -#import chromadb - - -OPENAI_API_KEY = os.environ['OPENAI_API_KEY'] -openai.organization = "org-ZPFAcooloqEkDeC13DXrp82L" - -def to_html_colored(txt): - # Convert strikethrough with red color - while "~~" in txt: - txt = txt.replace('~~', '', 1).replace('~~', '', 1) - # Convert underline with green color - while "__" in txt: - txt = txt.replace('__', '', 1).replace('__', '', 1) - - # Convert newline to
      for HTML display - txt = txt.replace('\n', '
      ') - - return txt - -def show_difference(txt1, txt2): - # Remove newline from text - txt1 = txt1.replace('\n', ' ') - - # Split by both space and newline to preserve newlines - words1 = [word for line in txt1.split('\n') for word in (line.split() + ['\n'])] - words2 = [word for line in txt2.split('\n') for word in (line.split() + ['\n'])] - - diff = list(difflib.ndiff(words1, words2)) - - result = [] - buffer = [] # To temporarily hold words before wrapping them in tags - prev_tag = ' ' # To track the previous tag (' ', '-', '+') - - for i, d in enumerate(diff): - if d[0] == ' ': - if prev_tag == '-': - result.append(f"~~{' '.join(buffer)}~~ ") - buffer = [] - elif prev_tag == '+': - result.append(f"__{' '.join(buffer)}__ ") - buffer = [] - - # If current word is a newline, add newline to result, else add the word with a space - result.append(d[2:] if d[2:] != '\n' else '
      ') - if d[2:] != '\n' and i != len(diff) - 1: - result.append(" ") - - prev_tag = ' ' - elif d[0] == '-': - if prev_tag == '+' and buffer: - result.append(f"__{' '.join(buffer)}__ ") - buffer = [] - buffer.append(d[2:]) - prev_tag = '-' - elif d[0] == '+': - if prev_tag == '-' and buffer: - result.append(f"~~{' '.join(buffer)}~~ ") - buffer = [] - buffer.append(d[2:]) - prev_tag = '+' - - # Append any remaining words in the buffer to result - if buffer: - if prev_tag == '-': - result.append(f"~~{' '.join(buffer)}~~") - elif prev_tag == '+': - result.append(f"__{' '.join(buffer)}__") - - return ''.join(result).strip() # Remove any leading or trailing spaces - -def rearrange_text(s): - # Regular expression to find text enclosed by "~~" and followed by two newlines - patterns = [r"~~(?:(?!~~).)*~~ __\n \n", r"\n \n__ ~~(?:(?! ~~).)*~~"] - - for pattern in patterns: - # Extract matches - matches = re.findall(pattern, s) - - # Remove matched text except the two newline characters - #s = re.sub(pattern, '__\n \n', s) - - # Insert the found text immediately after the two newline characters - for match in matches: - if pattern == patterns[0]: - s = s.replace(match, '__\n \n' + match.replace('__\n \n', ''), 1) - else: - s = s.replace(match, match.replace('\n \n__', '') + '\n \n__' , 1) - return s - - - -def query_gpt_edit(article): - template = ChatPromptTemplate.from_messages( - [ - SystemMessage( - content=( - "You are assuming the role of editor's assistant. You will be provided with an article that you must edit. \n DO NOT REWRITE THE ARTICLE, DO NOT CHANGE ANY PHRASES INSIDE QUOTE MARKS, DO NOT CHANGE FACTS OR NAMES, DO NOT REMOVE FULL SENTENCES OR PARAGRAPHS. 1. Edit the article in the following ways: 1a. Make sure the article uses AP style and structure. 1b. Correct grammatical errors. \n 3. Next, give options for headlines: Sensationalized headline, Objective headline in the style of New York Times, Adapt for a conservative audience, Adapt for a liberal audience, Adapt for politically center audience." - ) - ), - HumanMessagePromptTemplate.from_template("{text}"), - ] - ) - - llm = ChatOpenAI(model_name="gpt-4", temperature=0.1) - output = llm(template.format_messages(text=article)) - print(output.content) - return output.content - - - - -def output_formatted_edits(txt1, txt2): - html_output = to_html_colored(show_difference(txt1, txt2)) - return html_output - -def edit_article(og_article): - edited_article = query_gpt_edit(og_article) - return edited_article, output_formatted_edits(og_article, edited_article) - -def query_gpt4b(message, url, history=None): - if history is None: - history = [] - history_openai_format = [] - for human, assistant, in history: - history_openai_format.append({"role": "user", "content": human}) - history_openai_format.append({"role": "assistant", "content": assistant}) - history_openai_format.append({"role": "system", "content": "Instruction: You are a writer and editor, tasked with condensing news articles. You are to rewrite a condensed version of the article. Use different words so as to not plagiarize. Your work should have its own unique character and be in proper AP style. You will also be provided with a URL. At the end of your rewritten article, make sure to add the url you were provided along with following phrase: “Read more at “ make sure to insert the url as an html anchor tag link, with the text showing as the actual name of the news organization. For example if you were given url www.abcnews.com/articles/this_title, you would put Read more at ABC News. Lastly, your response should include three headline choices ranging from conservative to liberal to moderate, and suggested section headers if applicable."}) - history_openai_format.append({"role": "user", "content": f"{url} {message}"}) - - - response = openai.ChatCompletion.create( - model='gpt-4', - messages= history_openai_format, - temperature=0.6, - presence_penalty=0.5, - stream=True - ) - - partial_message = "" - for chunk in response: - if len(chunk['choices'][0]['delta']) != 0: - partial_message = partial_message + chunk['choices'][0]['delta']['content'] - yield partial_message - return partial_message - -def query_gpt4c(message, history=None): - if history is None: - history = [] - history_openai_format = [] - for human, assistant, in history: - history_openai_format.append({"role": "user", "content": human}) - history_openai_format.append({"role": "assistant", "content": assistant}) - history_openai_format.append({"role": "system", "content": "You will be provided with html of an article in which you will edit in the following ways. ALWAYS REPLY WITH THE ENTIRE ARTICLE THAT YOU EDITED. \n1. You are to change all links to open in a new tab \n2. Remove (AP) but leave the city and dash. If the very first word is a city name, leave it in all caps. \n3. Remove all html classes and any unneeded divs or spans. \n4. Change the headlines and section headers to AP Title Case. \n5. Add section headers if not provided and place them where appropriate using the

      tag and using AP Title Case which is upper lower case. \n6. If the article is over 600 words, find a good place towards the end of the article, to cut off. \n7. No need to change any words of the article or title other than that. Do not remove or change pullquotes from the article. \n8. Meta Description: In addition, at the end, provide a rewrite of the entire article but down to 64-99 characters, keep full names with designations, don't use dates but can keep day of the week, do in one or two sentences and make sure to use AP style and full sentence structure. "}) - history_openai_format.append({"role": "user", "content": message}) - - - response = openai.ChatCompletion.create( - model='gpt-4', - messages= history_openai_format, - temperature=0.3, - stream=True - ) - - partial_message = "" - for chunk in response: - if len(chunk['choices'][0]['delta']) != 0: - partial_message = partial_message + chunk['choices'][0]['delta']['content'] - yield partial_message - return partial_message - -def query_yt(message, history, videos_id): - loader = YoutubeLoader.from_youtube_url(f'{videos_id}', add_video_info=True) - loader.load() - index = VectorstoreIndexCreator( - vectorstore_cls=Chroma, - embedding=OpenAIEmbeddings(chunk_size=2000), - text_splitter=RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=150), - vectorstore_kwargs={'collection_name':'youtubeJS'} - ).from_loaders([loader]) - indexes = index.query(message) - history_openai_format = [] - for human, assistant in history: - history_openai_format.append({"role": "user", "content": human }) - history_openai_format.append({"role": "assistant", "content":assistant}) - history_openai_format.append({"role": "system", "content": "Instruction: You have been given an exerpt of a transcript of a youtube video. Your job is to answer questions regarding the youtube video, and give helpful insights about the videos content. You also take instructions and will follow them. Mixed with your training data, you give insightful, valuable responses. You are talking to Jeffrey. If you don't have an answer, or the question is otherwise unclear, you will politely ask that the user rephrase the question."}) - history_openai_format.append({"role": "user", "content": f"History of conversation between user and the AI assistant: {history} The question or instruction from the user: {message} Relevent exerpts of the youtube video's transcript: {indexes}" }) - - response = openai.ChatCompletion.create( - model='gpt-4', - messages= history_openai_format, - temperature=0, - max_tokens=1200, - stream=True - ) - - partial_message = "" - for chunk in response: - if len(chunk['choices'][0]['delta']) != 0: - partial_message = partial_message + chunk['choices'][0]['delta']['content'] - yield partial_message - -def pdf_query2(message, history, pdf_file): - loader = UnstructuredPDFLoader(pdf_file.name) - docs = loader.load() - index = VectorstoreIndexCreator( - vectorstore_cls=Chroma, - embedding=OpenAIEmbeddings(chunk_size=600), - text_splitter=RecursiveCharacterTextSplitter(chunk_size=600, chunk_overlap=240), - vectorstore_kwargs={'collection_name':'pdfJS'} - ).from_loaders([loader]) - indexes = index.query(message) - history_openai_format = [] - for human, assistant in history: - history_openai_format.append({"role": "user", "content": human }) - history_openai_format.append({"role": "assistant", "content":assistant}) - history_openai_format.append({"role": "system", "content": "Instruction: You have been given an exerpt of the contents of a pdf document. Your job is to answer questions regarding the pdf document's contents, and give helpful insights about it. You also take instructions and will follow them. Mixed with your training data, you give insightful, valuable responses. You are talking to Jeffrey. If you don't have an answer, or the question is otherwise unclear, you will politely ask that the user rephrase the question."}) - history_openai_format.append({"role": "user", "content": f"History of conversation between user and the AI assistant: {history} The question or instruction from the user: {message} Relevent exerpts of the pdf document: {indexes}" }) - - response = openai.ChatCompletion.create( - model='gpt-4', - messages= history_openai_format, - temperature=0.3, - max_tokens=800, - stream=True - ) - - partial_message = "" - for chunk in response: - if len(chunk['choices'][0]['delta']) != 0: - partial_message = partial_message + chunk['choices'][0]['delta']['content'] - yield partial_message - - - -# index = VectorstoreIndexCreator( -# vectorstore_cls=Chroma, -# embedding=OpenAIEmbeddings(), -# text_splitter=CharacterTextSplitter(chunk_size=9000, chunk_overlap=100) - - - - -with gr.Blocks(theme=gr.themes.Soft(primary_hue="teal").set(prose_text_weight='100'), mode="Advanced Data Query and Create", title="GV-AI JS", css=".gradio-container {padding: 0px 0px 80px 0px !important; max-width: 800px !important;}") as demo: - gr.Markdown("

      Article Editing, Rewriting, and Research.

      ") - with gr.Tab("Edit an Article"): - gr.Markdown("Paste in the article, and the AI will respond with an edited version, as well as new headline options.") - text_inputj = gr.components.Textbox(label="Paste article here") - text_outputj = gr.components.Textbox(label="AI response: ", show_copy_button=True) - text_buttonj = gr.components.Button("Submit Article") - html_outputj = gr.HTML() - - with gr.Tab("AP articles"): - gr.Markdown("Instructions: Go to the AP article in newsroom. Copy the headline as is, and paste it into the copy area of a new post in Wordpress. Right under that, paste in the rest of the article as is from newsroom. Go to the text tab. Copy everything in there. Paste that below and and press the button.") - text_inputc = gr.components.Textbox(label="Paste html of article with the headline here") - text_outputc = gr.components.Textbox(label="AI response: ", show_copy_button=True) - text_buttonc = gr.components.Button("Submit Article") - - with gr.Tab("Aggregate an Article"): - gr.Markdown("Aggregate any article using this tab. ") - text_input_url = gr.components.Textbox(label="Enter the URL to the Article Here: ") - text_inpute = gr.components.Textbox(label="Paste article here: ") - text_outpute = gr.components.Textbox(label="AI Response", show_copy_button=True) - text_buttone = gr.components.Button("Submit") - - with gr.Tab("YouTube Research Tool"): - gr.Markdown("Provide a YouTube URL and the A.I. chatbot will study the transcript. You can then ask questions about the video, or have the A.I. perform tasks based on the video, i.e. 'Write an article about the video.' May take up to 30 seconds or more to respond.") - gr.ChatInterface( - query_yt, - retry_btn=None, - undo_btn="Delete Previous", - clear_btn="Clear", - stop_btn="Stop", - additional_inputs=gr.Textbox(placeholder='Paste URL here', label='YouTube URL'), - autofocus=True) - - - with gr.Tab("PDF Research Tool"): - gr.Markdown("Provide a pdf file and the A.I. will read all of it and answer your questions about it or perform actions based on the contents of the document, i.e. 'Write an article about the pdf file' or 'list all of the topics in the document' etc. May take up to 30 seconds or more to respond.") - gr.ChatInterface( - pdf_query2, - css=".gradio-container {padding: 0px 0px 80px 0px !important; max-height: 600px !important;}", - retry_btn=None, - undo_btn="Delete Previous", - clear_btn="Clear", - stop_btn="Stop", - additional_inputs=gr.components.File(type='file'), - autofocus=True) - - - text_buttonj.click(edit_article, inputs=text_inputj, outputs=[text_outputj, html_outputj]) - text_buttonc.click(query_gpt4c, inputs=text_inputc, outputs=text_outputc) - text_buttone.click(query_gpt4b, inputs=[text_inpute, text_input_url], outputs=text_outpute) - - -USER_LOGIN = os.environ['USER_LOGIN'] -PASS_LOGIN = os.environ['PASS_LOGIN'] - -demo.queue().launch(auth=(f"{USER_LOGIN}", f"{PASS_LOGIN}"), show_error=False, ssl_verify=True) \ No newline at end of file diff --git a/spaces/gylleus/icongen/torch_utils/ops/bias_act.py b/spaces/gylleus/icongen/torch_utils/ops/bias_act.py deleted file mode 100644 index b092c7ffe1b11591bd047dd8ad22725084b0568d..0000000000000000000000000000000000000000 --- a/spaces/gylleus/icongen/torch_utils/ops/bias_act.py +++ /dev/null @@ -1,212 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Custom PyTorch ops for efficient bias and activation.""" - -import os -import sys -import warnings -import numpy as np -import torch -import dnnlib - -from .. import custom_ops -from .. import misc - -#---------------------------------------------------------------------------- - -activation_funcs = { - 'linear': dnnlib.EasyDict(func=lambda x, **_: x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False), - 'relu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False), - 'lrelu': dnnlib.EasyDict(func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False), - 'tanh': dnnlib.EasyDict(func=lambda x, **_: torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True), - 'sigmoid': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True), - 'elu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True), - 'selu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True), - 'softplus': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True), - 'swish': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x) * x, def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True), -} - -#---------------------------------------------------------------------------- - -_inited = False -_plugin = None -_null_tensor = torch.empty([0]) - -def _init(): - global _inited, _plugin - if not _inited: - _inited = True - sources = ['bias_act.cpp', 'bias_act.cu'] - sources = [os.path.join(os.path.dirname(__file__), s) for s in sources] - try: - _plugin = custom_ops.get_plugin('bias_act_plugin', sources=sources, extra_cuda_cflags=['--use_fast_math']) - except: - warnings.warn('Failed to build CUDA kernels for bias_act. Falling back to slow reference implementation. Details:\n\n' + str(sys.exc_info()[1])) - return _plugin is not None - -#---------------------------------------------------------------------------- - -def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None, impl='cuda'): - r"""Fused bias and activation function. - - Adds bias `b` to activation tensor `x`, evaluates activation function `act`, - and scales the result by `gain`. Each of the steps is optional. In most cases, - the fused op is considerably more efficient than performing the same calculation - using standard PyTorch ops. It supports first and second order gradients, - but not third order gradients. - - Args: - x: Input activation tensor. Can be of any shape. - b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type - as `x`. The shape must be known, and it must match the dimension of `x` - corresponding to `dim`. - dim: The dimension in `x` corresponding to the elements of `b`. - The value of `dim` is ignored if `b` is not specified. - act: Name of the activation function to evaluate, or `"linear"` to disable. - Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc. - See `activation_funcs` for a full list. `None` is not allowed. - alpha: Shape parameter for the activation function, or `None` to use the default. - gain: Scaling factor for the output tensor, or `None` to use default. - See `activation_funcs` for the default scaling of each activation function. - If unsure, consider specifying 1. - clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable - the clamping (default). - impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default). - - Returns: - Tensor of the same shape and datatype as `x`. - """ - assert isinstance(x, torch.Tensor) - assert impl in ['ref', 'cuda'] - if impl == 'cuda' and x.device.type == 'cuda' and _init(): - return _bias_act_cuda(dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp).apply(x, b) - return _bias_act_ref(x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp) - -#---------------------------------------------------------------------------- - -@misc.profiled_function -def _bias_act_ref(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None): - """Slow reference implementation of `bias_act()` using standard TensorFlow ops. - """ - assert isinstance(x, torch.Tensor) - assert clamp is None or clamp >= 0 - spec = activation_funcs[act] - alpha = float(alpha if alpha is not None else spec.def_alpha) - gain = float(gain if gain is not None else spec.def_gain) - clamp = float(clamp if clamp is not None else -1) - - # Add bias. - if b is not None: - assert isinstance(b, torch.Tensor) and b.ndim == 1 - assert 0 <= dim < x.ndim - assert b.shape[0] == x.shape[dim] - x = x + b.reshape([-1 if i == dim else 1 for i in range(x.ndim)]) - - # Evaluate activation function. - alpha = float(alpha) - x = spec.func(x, alpha=alpha) - - # Scale by gain. - gain = float(gain) - if gain != 1: - x = x * gain - - # Clamp. - if clamp >= 0: - x = x.clamp(-clamp, clamp) # pylint: disable=invalid-unary-operand-type - return x - -#---------------------------------------------------------------------------- - -_bias_act_cuda_cache = dict() - -def _bias_act_cuda(dim=1, act='linear', alpha=None, gain=None, clamp=None): - """Fast CUDA implementation of `bias_act()` using custom ops. - """ - # Parse arguments. - assert clamp is None or clamp >= 0 - spec = activation_funcs[act] - alpha = float(alpha if alpha is not None else spec.def_alpha) - gain = float(gain if gain is not None else spec.def_gain) - clamp = float(clamp if clamp is not None else -1) - - # Lookup from cache. - key = (dim, act, alpha, gain, clamp) - if key in _bias_act_cuda_cache: - return _bias_act_cuda_cache[key] - - # Forward op. - class BiasActCuda(torch.autograd.Function): - @staticmethod - def forward(ctx, x, b): # pylint: disable=arguments-differ - ctx.memory_format = torch.channels_last if x.ndim > 2 and x.stride()[1] == 1 else torch.contiguous_format - x = x.contiguous(memory_format=ctx.memory_format) - b = b.contiguous() if b is not None else _null_tensor - y = x - if act != 'linear' or gain != 1 or clamp >= 0 or b is not _null_tensor: - y = _plugin.bias_act(x, b, _null_tensor, _null_tensor, _null_tensor, 0, dim, spec.cuda_idx, alpha, gain, clamp) - ctx.save_for_backward( - x if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor, - b if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor, - y if 'y' in spec.ref else _null_tensor) - return y - - @staticmethod - def backward(ctx, dy): # pylint: disable=arguments-differ - dy = dy.contiguous(memory_format=ctx.memory_format) - x, b, y = ctx.saved_tensors - dx = None - db = None - - if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: - dx = dy - if act != 'linear' or gain != 1 or clamp >= 0: - dx = BiasActCudaGrad.apply(dy, x, b, y) - - if ctx.needs_input_grad[1]: - db = dx.sum([i for i in range(dx.ndim) if i != dim]) - - return dx, db - - # Backward op. - class BiasActCudaGrad(torch.autograd.Function): - @staticmethod - def forward(ctx, dy, x, b, y): # pylint: disable=arguments-differ - ctx.memory_format = torch.channels_last if dy.ndim > 2 and dy.stride()[1] == 1 else torch.contiguous_format - dx = _plugin.bias_act(dy, b, x, y, _null_tensor, 1, dim, spec.cuda_idx, alpha, gain, clamp) - ctx.save_for_backward( - dy if spec.has_2nd_grad else _null_tensor, - x, b, y) - return dx - - @staticmethod - def backward(ctx, d_dx): # pylint: disable=arguments-differ - d_dx = d_dx.contiguous(memory_format=ctx.memory_format) - dy, x, b, y = ctx.saved_tensors - d_dy = None - d_x = None - d_b = None - d_y = None - - if ctx.needs_input_grad[0]: - d_dy = BiasActCudaGrad.apply(d_dx, x, b, y) - - if spec.has_2nd_grad and (ctx.needs_input_grad[1] or ctx.needs_input_grad[2]): - d_x = _plugin.bias_act(d_dx, b, x, y, dy, 2, dim, spec.cuda_idx, alpha, gain, clamp) - - if spec.has_2nd_grad and ctx.needs_input_grad[2]: - d_b = d_x.sum([i for i in range(d_x.ndim) if i != dim]) - - return d_dy, d_x, d_b, d_y - - # Add to cache. - _bias_act_cuda_cache[key] = BiasActCuda - return BiasActCuda - -#---------------------------------------------------------------------------- diff --git a/spaces/gyugnsu/DragGan-Inversion/gradio_utils/utils.py b/spaces/gyugnsu/DragGan-Inversion/gradio_utils/utils.py deleted file mode 100644 index d4e760e1515f3f69b11d11426ac3e8fa51f1a99c..0000000000000000000000000000000000000000 --- a/spaces/gyugnsu/DragGan-Inversion/gradio_utils/utils.py +++ /dev/null @@ -1,154 +0,0 @@ -import gradio as gr -import numpy as np -from PIL import Image, ImageDraw - - -class ImageMask(gr.components.Image): - """ - Sets: source="canvas", tool="sketch" - """ - - is_template = True - - def __init__(self, **kwargs): - super().__init__(source="upload", - tool="sketch", - interactive=False, - **kwargs) - - def preprocess(self, x): - if x is None: - return x - if self.tool == "sketch" and self.source in ["upload", "webcam" - ] and type(x) != dict: - decode_image = gr.processing_utils.decode_base64_to_image(x) - width, height = decode_image.size - mask = np.ones((height, width, 4), dtype=np.uint8) - mask[..., -1] = 255 - mask = self.postprocess(mask) - x = {'image': x, 'mask': mask} - return super().preprocess(x) - - -def get_valid_mask(mask: np.ndarray): - """Convert mask from gr.Image(0 to 255, RGBA) to binary mask. - """ - if mask.ndim == 3: - mask_pil = Image.fromarray(mask).convert('L') - mask = np.array(mask_pil) - if mask.max() == 255: - mask = mask / 255 - return mask - - -def draw_points_on_image(image, - points, - curr_point=None, - highlight_all=True, - radius_scale=0.01): - overlay_rgba = Image.new("RGBA", image.size, 0) - overlay_draw = ImageDraw.Draw(overlay_rgba) - for point_key, point in points.items(): - if ((curr_point is not None and curr_point == point_key) - or highlight_all): - p_color = (255, 0, 0) - t_color = (0, 0, 255) - - else: - p_color = (255, 0, 0, 35) - t_color = (0, 0, 255, 35) - - rad_draw = int(image.size[0] * radius_scale) - - p_start = point.get("start_temp", point["start"]) - p_target = point["target"] - - if p_start is not None and p_target is not None: - p_draw = int(p_start[0]), int(p_start[1]) - t_draw = int(p_target[0]), int(p_target[1]) - - overlay_draw.line( - (p_draw[0], p_draw[1], t_draw[0], t_draw[1]), - fill=(255, 255, 0), - width=2, - ) - - if p_start is not None: - p_draw = int(p_start[0]), int(p_start[1]) - overlay_draw.ellipse( - ( - p_draw[0] - rad_draw, - p_draw[1] - rad_draw, - p_draw[0] + rad_draw, - p_draw[1] + rad_draw, - ), - fill=p_color, - ) - - if curr_point is not None and curr_point == point_key: - # overlay_draw.text(p_draw, "p", font=font, align="center", fill=(0, 0, 0)) - overlay_draw.text(p_draw, "p", align="center", fill=(0, 0, 0)) - - if p_target is not None: - t_draw = int(p_target[0]), int(p_target[1]) - overlay_draw.ellipse( - ( - t_draw[0] - rad_draw, - t_draw[1] - rad_draw, - t_draw[0] + rad_draw, - t_draw[1] + rad_draw, - ), - fill=t_color, - ) - - if curr_point is not None and curr_point == point_key: - # overlay_draw.text(t_draw, "t", font=font, align="center", fill=(0, 0, 0)) - overlay_draw.text(t_draw, "t", align="center", fill=(0, 0, 0)) - - return Image.alpha_composite(image.convert("RGBA"), - overlay_rgba).convert("RGB") - - -def draw_mask_on_image(image, mask): - im_mask = np.uint8(mask * 255) - im_mask_rgba = np.concatenate( - ( - np.tile(im_mask[..., None], [1, 1, 3]), - 45 * np.ones( - (im_mask.shape[0], im_mask.shape[1], 1), dtype=np.uint8), - ), - axis=-1, - ) - im_mask_rgba = Image.fromarray(im_mask_rgba).convert("RGBA") - - return Image.alpha_composite(image.convert("RGBA"), - im_mask_rgba).convert("RGB") - - -def on_change_single_global_state(keys, - value, - global_state, - map_transform=None): - if map_transform is not None: - value = map_transform(value) - - curr_state = global_state - if isinstance(keys, str): - last_key = keys - - else: - for k in keys[:-1]: - curr_state = curr_state[k] - - last_key = keys[-1] - - curr_state[last_key] = value - return global_state - - -def get_latest_points_pair(points_dict): - if not points_dict: - return None - point_idx = list(points_dict.keys()) - latest_point_idx = max(point_idx) - return latest_point_idx diff --git a/spaces/hamzapehlivan/StyleRes/models/torch_utils/ops/upfirdn2d.py b/spaces/hamzapehlivan/StyleRes/models/torch_utils/ops/upfirdn2d.py deleted file mode 100644 index b544be1d52e97bfc02e59d08c30c6ddbb69bdbde..0000000000000000000000000000000000000000 --- a/spaces/hamzapehlivan/StyleRes/models/torch_utils/ops/upfirdn2d.py +++ /dev/null @@ -1,389 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Custom PyTorch ops for efficient resampling of 2D images.""" - -import os -import numpy as np -import torch - -from .. import custom_ops -from .. import misc -from . import conv2d_gradfix - -#---------------------------------------------------------------------------- - -_plugin = None - -def _init(): - global _plugin - if _plugin is None: - _plugin = custom_ops.get_plugin( - module_name='upfirdn2d_plugin', - sources=['upfirdn2d.cpp', 'upfirdn2d.cu'], - headers=['upfirdn2d.h'], - source_dir=os.path.dirname(__file__), - extra_cuda_cflags=['--use_fast_math'], - ) - return True - -def _parse_scaling(scaling): - if isinstance(scaling, int): - scaling = [scaling, scaling] - assert isinstance(scaling, (list, tuple)) - assert all(isinstance(x, int) for x in scaling) - sx, sy = scaling - assert sx >= 1 and sy >= 1 - return sx, sy - -def _parse_padding(padding): - if isinstance(padding, int): - padding = [padding, padding] - assert isinstance(padding, (list, tuple)) - assert all(isinstance(x, int) for x in padding) - if len(padding) == 2: - padx, pady = padding - padding = [padx, padx, pady, pady] - padx0, padx1, pady0, pady1 = padding - return padx0, padx1, pady0, pady1 - -def _get_filter_size(f): - if f is None: - return 1, 1 - assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] - fw = f.shape[-1] - fh = f.shape[0] - with misc.suppress_tracer_warnings(): - fw = int(fw) - fh = int(fh) - misc.assert_shape(f, [fh, fw][:f.ndim]) - assert fw >= 1 and fh >= 1 - return fw, fh - -#---------------------------------------------------------------------------- - -def setup_filter(f, device=torch.device('cpu'), normalize=True, flip_filter=False, gain=1, separable=None): - r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`. - - Args: - f: Torch tensor, numpy array, or python list of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), - `[]` (impulse), or - `None` (identity). - device: Result device (default: cpu). - normalize: Normalize the filter so that it retains the magnitude - for constant input signal (DC)? (default: True). - flip_filter: Flip the filter? (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - separable: Return a separable filter? (default: select automatically). - - Returns: - Float32 tensor of the shape - `[filter_height, filter_width]` (non-separable) or - `[filter_taps]` (separable). - """ - # Validate. - if f is None: - f = 1 - f = torch.as_tensor(f, dtype=torch.float32) - assert f.ndim in [0, 1, 2] - assert f.numel() > 0 - if f.ndim == 0: - f = f[np.newaxis] - - # Separable? - if separable is None: - separable = (f.ndim == 1 and f.numel() >= 8) - if f.ndim == 1 and not separable: - f = f.ger(f) - assert f.ndim == (1 if separable else 2) - - # Apply normalize, flip, gain, and device. - if normalize: - f /= f.sum() - if flip_filter: - f = f.flip(list(range(f.ndim))) - f = f * (gain ** (f.ndim / 2)) - f = f.to(device=device) - return f - -#---------------------------------------------------------------------------- - -def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'): - r"""Pad, upsample, filter, and downsample a batch of 2D images. - - Performs the following sequence of operations for each channel: - - 1. Upsample the image by inserting N-1 zeros after each pixel (`up`). - - 2. Pad the image with the specified number of zeros on each side (`padding`). - Negative padding corresponds to cropping the image. - - 3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it - so that the footprint of all output pixels lies within the input image. - - 4. Downsample the image by keeping every Nth pixel (`down`). - - This sequence of operations bears close resemblance to scipy.signal.upfirdn(). - The fused op is considerably more efficient than performing the same calculation - using standard PyTorch ops. It supports gradients of arbitrary order. - - Args: - x: Float32/float64/float16 input tensor of the shape - `[batch_size, num_channels, in_height, in_width]`. - f: Float32 FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - up: Integer upsampling factor. Can be a single int or a list/tuple - `[x, y]` (default: 1). - down: Integer downsampling factor. Can be a single int or a list/tuple - `[x, y]` (default: 1). - padding: Padding with respect to the upsampled image. Can be a single number - or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - flip_filter: False = convolution, True = correlation (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - assert isinstance(x, torch.Tensor) - assert impl in ['ref', 'cuda'] - if impl == 'cuda' and x.device.type == 'cuda' and _init(): - return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f) - return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain) - -#---------------------------------------------------------------------------- - -@misc.profiled_function -def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1): - """Slow reference implementation of `upfirdn2d()` using standard PyTorch ops. - """ - # Validate arguments. - assert isinstance(x, torch.Tensor) and x.ndim == 4 - if f is None: - f = torch.ones([1, 1], dtype=torch.float32, device=x.device) - assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] - assert f.dtype == torch.float32 and not f.requires_grad - batch_size, num_channels, in_height, in_width = x.shape - upx, upy = _parse_scaling(up) - downx, downy = _parse_scaling(down) - padx0, padx1, pady0, pady1 = _parse_padding(padding) - - # Check that upsampled buffer is not smaller than the filter. - upW = in_width * upx + padx0 + padx1 - upH = in_height * upy + pady0 + pady1 - assert upW >= f.shape[-1] and upH >= f.shape[0] - - # Upsample by inserting zeros. - x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1]) - x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1]) - x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx]) - - # Pad or crop. - x = torch.nn.functional.pad(x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)]) - x = x[:, :, max(-pady0, 0) : x.shape[2] - max(-pady1, 0), max(-padx0, 0) : x.shape[3] - max(-padx1, 0)] - - # Setup filter. - f = f * (gain ** (f.ndim / 2)) - f = f.to(x.dtype) - if not flip_filter: - f = f.flip(list(range(f.ndim))) - - # Convolve with the filter. - f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim) - if f.ndim == 4: - x = conv2d_gradfix.conv2d(input=x, weight=f, groups=num_channels) - else: - x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels) - x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels) - - # Downsample by throwing away pixels. - x = x[:, :, ::downy, ::downx] - return x - -#---------------------------------------------------------------------------- - -_upfirdn2d_cuda_cache = dict() - -def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1): - """Fast CUDA implementation of `upfirdn2d()` using custom ops. - """ - # Parse arguments. - upx, upy = _parse_scaling(up) - downx, downy = _parse_scaling(down) - padx0, padx1, pady0, pady1 = _parse_padding(padding) - - # Lookup from cache. - key = (upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain) - if key in _upfirdn2d_cuda_cache: - return _upfirdn2d_cuda_cache[key] - - # Forward op. - class Upfirdn2dCuda(torch.autograd.Function): - @staticmethod - def forward(ctx, x, f): # pylint: disable=arguments-differ - assert isinstance(x, torch.Tensor) and x.ndim == 4 - if f is None: - f = torch.ones([1, 1], dtype=torch.float32, device=x.device) - if f.ndim == 1 and f.shape[0] == 1: - f = f.square().unsqueeze(0) # Convert separable-1 into full-1x1. - assert isinstance(f, torch.Tensor) and f.ndim in [1, 2] - y = x - if f.ndim == 2: - y = _plugin.upfirdn2d(y, f, upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain) - else: - y = _plugin.upfirdn2d(y, f.unsqueeze(0), upx, 1, downx, 1, padx0, padx1, 0, 0, flip_filter, 1.0) - y = _plugin.upfirdn2d(y, f.unsqueeze(1), 1, upy, 1, downy, 0, 0, pady0, pady1, flip_filter, gain) - ctx.save_for_backward(f) - ctx.x_shape = x.shape - return y - - @staticmethod - def backward(ctx, dy): # pylint: disable=arguments-differ - f, = ctx.saved_tensors - _, _, ih, iw = ctx.x_shape - _, _, oh, ow = dy.shape - fw, fh = _get_filter_size(f) - p = [ - fw - padx0 - 1, - iw * upx - ow * downx + padx0 - upx + 1, - fh - pady0 - 1, - ih * upy - oh * downy + pady0 - upy + 1, - ] - dx = None - df = None - - if ctx.needs_input_grad[0]: - dx = _upfirdn2d_cuda(up=down, down=up, padding=p, flip_filter=(not flip_filter), gain=gain).apply(dy, f) - - assert not ctx.needs_input_grad[1] - return dx, df - - # Add to cache. - _upfirdn2d_cuda_cache[key] = Upfirdn2dCuda - return Upfirdn2dCuda - -#---------------------------------------------------------------------------- - -def filter2d(x, f, padding=0, flip_filter=False, gain=1, impl='cuda'): - r"""Filter a batch of 2D images using the given 2D FIR filter. - - By default, the result is padded so that its shape matches the input. - User-specified padding is applied on top of that, with negative values - indicating cropping. Pixels outside the image are assumed to be zero. - - Args: - x: Float32/float64/float16 input tensor of the shape - `[batch_size, num_channels, in_height, in_width]`. - f: Float32 FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - padding: Padding with respect to the output. Can be a single number or a - list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - flip_filter: False = convolution, True = correlation (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - padx0, padx1, pady0, pady1 = _parse_padding(padding) - fw, fh = _get_filter_size(f) - p = [ - padx0 + fw // 2, - padx1 + (fw - 1) // 2, - pady0 + fh // 2, - pady1 + (fh - 1) // 2, - ] - return upfirdn2d(x, f, padding=p, flip_filter=flip_filter, gain=gain, impl=impl) - -#---------------------------------------------------------------------------- - -def upsample2d(x, f, up=2, padding=0, flip_filter=False, gain=1, impl='cuda'): - r"""Upsample a batch of 2D images using the given 2D FIR filter. - - By default, the result is padded so that its shape is a multiple of the input. - User-specified padding is applied on top of that, with negative values - indicating cropping. Pixels outside the image are assumed to be zero. - - Args: - x: Float32/float64/float16 input tensor of the shape - `[batch_size, num_channels, in_height, in_width]`. - f: Float32 FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - up: Integer upsampling factor. Can be a single int or a list/tuple - `[x, y]` (default: 1). - padding: Padding with respect to the output. Can be a single number or a - list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - flip_filter: False = convolution, True = correlation (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - upx, upy = _parse_scaling(up) - padx0, padx1, pady0, pady1 = _parse_padding(padding) - fw, fh = _get_filter_size(f) - p = [ - padx0 + (fw + upx - 1) // 2, - padx1 + (fw - upx) // 2, - pady0 + (fh + upy - 1) // 2, - pady1 + (fh - upy) // 2, - ] - return upfirdn2d(x, f, up=up, padding=p, flip_filter=flip_filter, gain=gain*upx*upy, impl=impl) - -#---------------------------------------------------------------------------- - -def downsample2d(x, f, down=2, padding=0, flip_filter=False, gain=1, impl='cuda'): - r"""Downsample a batch of 2D images using the given 2D FIR filter. - - By default, the result is padded so that its shape is a fraction of the input. - User-specified padding is applied on top of that, with negative values - indicating cropping. Pixels outside the image are assumed to be zero. - - Args: - x: Float32/float64/float16 input tensor of the shape - `[batch_size, num_channels, in_height, in_width]`. - f: Float32 FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - down: Integer downsampling factor. Can be a single int or a list/tuple - `[x, y]` (default: 1). - padding: Padding with respect to the input. Can be a single number or a - list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - flip_filter: False = convolution, True = correlation (default: False). - gain: Overall scaling factor for signal magnitude (default: 1). - impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - downx, downy = _parse_scaling(down) - padx0, padx1, pady0, pady1 = _parse_padding(padding) - fw, fh = _get_filter_size(f) - p = [ - padx0 + (fw - downx + 1) // 2, - padx1 + (fw - downx) // 2, - pady0 + (fh - downy + 1) // 2, - pady1 + (fh - downy) // 2, - ] - return upfirdn2d(x, f, down=down, padding=p, flip_filter=flip_filter, gain=gain, impl=impl) - -#---------------------------------------------------------------------------- diff --git a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/modeling/detector/__init__.py b/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/modeling/detector/__init__.py deleted file mode 100644 index 4ccccd580d9470841c4529d797aa9b635271718e..0000000000000000000000000000000000000000 --- a/spaces/haotiz/glip-zeroshot-demo/maskrcnn_benchmark/modeling/detector/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -from .generalized_rcnn import GeneralizedRCNN -from .generalized_vl_rcnn import GeneralizedVLRCNN - -_DETECTION_META_ARCHITECTURES = {"GeneralizedRCNN": GeneralizedRCNN, - "GeneralizedVLRCNN": GeneralizedVLRCNN - } - - -def build_detection_model(cfg): - meta_arch = _DETECTION_META_ARCHITECTURES[cfg.MODEL.META_ARCHITECTURE] - return meta_arch(cfg) diff --git "a/spaces/hbestm/gpt-academic-play/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" "b/spaces/hbestm/gpt-academic-play/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" deleted file mode 100644 index 6a7d118b4439605db6e10b9a416a2e725b99a672..0000000000000000000000000000000000000000 --- "a/spaces/hbestm/gpt-academic-play/crazy_functions/\350\201\224\347\275\221\347\232\204ChatGPT.py" +++ /dev/null @@ -1,102 +0,0 @@ -from toolbox import CatchException, update_ui -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping -import requests -from bs4 import BeautifulSoup -from request_llm.bridge_all import model_info - -def google(query, proxies): - query = query # 在此处替换您要搜索的关键词 - url = f"https://www.google.com/search?q={query}" - headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36'} - response = requests.get(url, headers=headers, proxies=proxies) - soup = BeautifulSoup(response.content, 'html.parser') - results = [] - for g in soup.find_all('div', class_='g'): - anchors = g.find_all('a') - if anchors: - link = anchors[0]['href'] - if link.startswith('/url?q='): - link = link[7:] - if not link.startswith('http'): - continue - title = g.find('h3').text - item = {'title': title, 'link': link} - results.append(item) - - for r in results: - print(r['link']) - return results - -def scrape_text(url, proxies) -> str: - """Scrape text from a webpage - - Args: - url (str): The URL to scrape text from - - Returns: - str: The scraped text - """ - headers = { - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.61 Safari/537.36', - 'Content-Type': 'text/plain', - } - try: - response = requests.get(url, headers=headers, proxies=proxies, timeout=8) - if response.encoding == "ISO-8859-1": response.encoding = response.apparent_encoding - except: - return "无法连接到该网页" - soup = BeautifulSoup(response.text, "html.parser") - for script in soup(["script", "style"]): - script.extract() - text = soup.get_text() - lines = (line.strip() for line in text.splitlines()) - chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) - text = "\n".join(chunk for chunk in chunks if chunk) - return text - -@CatchException -def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ - history = [] # 清空历史,以免输入溢出 - chatbot.append((f"请结合互联网信息回答以下问题:{txt}", - "[Local Message] 请注意,您正在调用一个[函数插件]的模板,该模板可以实现ChatGPT联网信息综合。该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板。您若希望分享新的功能模组,请不吝PR!")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - - # ------------- < 第1步:爬取搜索引擎的结果 > ------------- - from toolbox import get_conf - proxies, = get_conf('proxies') - urls = google(txt, proxies) - history = [] - - # ------------- < 第2步:依次访问网页 > ------------- - max_search_result = 5 # 最多收纳多少个网页的结果 - for index, url in enumerate(urls[:max_search_result]): - res = scrape_text(url['link'], proxies) - history.extend([f"第{index}份搜索结果:", res]) - chatbot.append([f"第{index}份搜索结果:", res[:500]+"......"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - - # ------------- < 第3步:ChatGPT综合 > ------------- - i_say = f"从以上搜索结果中抽取信息,然后回答问题:{txt}" - i_say, history = input_clipping( # 裁剪输入,从最长的条目开始裁剪,防止爆token - inputs=i_say, - history=history, - max_token_limit=model_info[llm_kwargs['llm_model']]['max_token']*3//4 - ) - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=i_say, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=history, - sys_prompt="请从给定的若干条搜索结果中抽取信息,对最相关的两个搜索结果进行总结,然后回答问题。" - ) - chatbot[-1] = (i_say, gpt_say) - history.append(i_say);history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 - diff --git a/spaces/huaiji3y/bingo-Public/cloudflare/worker.js b/spaces/huaiji3y/bingo-Public/cloudflare/worker.js deleted file mode 100644 index e0debd750615f1329b2c72fbce73e1b9291f7137..0000000000000000000000000000000000000000 --- a/spaces/huaiji3y/bingo-Public/cloudflare/worker.js +++ /dev/null @@ -1,18 +0,0 @@ -const TRAGET_HOST='hf4all-bingo.hf.space' // 请将此域名改成你自己的,域名信息在设置》站点域名查看。 - -export default { - async fetch(request) { - const uri = new URL(request.url); - if (uri.protocol === 'http:') { - uri.protocol = 'https:'; - return new Response('', { - status: 301, - headers: { - location: uri.toString(), - }, - }) - } - uri.host = TRAGET_HOST - return fetch(new Request(uri.toString(), request)); - }, -}; diff --git a/spaces/huggingface-projects/video-composer-gpt4/prompts.md b/spaces/huggingface-projects/video-composer-gpt4/prompts.md deleted file mode 100644 index 05a769bbd60e0cf1187fb6df50aba4bfa36bac11..0000000000000000000000000000000000000000 --- a/spaces/huggingface-projects/video-composer-gpt4/prompts.md +++ /dev/null @@ -1,33 +0,0 @@ -## Prompts - -- https://github.com/openai/openai-cookbook/blob/main/techniques_to_improve_reliability.md -- - -"I want you to act as a javascript console. I will type commands and you will reply with what the javascript console should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. do not write explanations. do not type commands unless I instruct you to do so. when i need to tell you something in english, i will do so by putting text inside curly brackets {like this}. my first command is console.log("Hello World");" - -"I want you to act as a javascript console. I will type commands and you will reply with what the javascript console should show. I want you to only reply with the terminal output inside one unique code block, and nothing else. do not write explanations. Do not type commands unless I instruct you to do so." - -You are an automated FFMPEG command generator. Use FFMPEG to create a new video that perform this operation {prompt} using those files: {info_string}. Respond with the simplest ffmpeg command and make sure it's valid as it will be pasted directly into the terminal. Try to avoid complex FFMPEG options and stay simple. Always name the new video "output.mp4". Never give any explanation, only the shell command. - -You are an automated FFMPEG command generator. Use FFMPEG to create a new video that perform this operation {prompt} using those files: {info_string}. Respond with the simplest ffmpeg command and make sure it's valid as it will be pasted directly into the terminal. Try to avoid complex FFMPEG options and stay simple. Always name the new video "output.mp4". Never give any explanation, only the shell command. You can use the following files: - -You are an automated FFMPEG command generator. You'll be using FFMPEG to compose a new video from a list of files and a user prompt. Lets's think step by step. Always name the new video "output.mp4". Never give any explanation, only the shell command. Do nothing and wait for the user prompt. Wrap the FFMPEG command in a code block. - ---- - -f"""You are an agent controlling a UNIX terminal. You are given: -(1) a set of video, audio and image assets. Including their name, duration, dimensions and file size -(2) the description of a new video you need to create from the list of assets - -Based on the available assets and the description, your objective issue a FFMPEG command you believe will work creating a new video. - -This will often involve putting assets one after the other, cropping the video format, or playing music in the background. Avoid using complex FFMPEG options, and try to keep the command as simple as possible. -Always name the output of the FFMPEG command "output.mp4". Always use the FFMPEG overwrite option (-y). Think step by step but never give any explanation, only the shell command. - -The current assets and objective follow. Reply with the FFMPEG command: - -AVAILABLE ASSETS: -OBJECTIVE: -YOUR FFMPEG COMMAND:""" - -"content": f"""You'll need to create a valid FFMPEG command that will be directly pasted in the terminal. You have those files (images, videos, and audio) at your disposal: {files_info} and you need to compose a new video using FFMPEG and following those instructions: "{prompt}". You'll need to use as many assets as you can. Make sure it's a valid command that will not do any error. Always name the output of the FFMPEG command "output.mp4". Always use the FFMPEG overwrite option (-y). Try to avoid using -filter_complex option. Don't produce video longer than 1 minute. Think step by step but never give any explanation, only the shell command.""", diff --git a/spaces/hwchase17/chat-your-data-state-of-the-union/cli_app.py b/spaces/hwchase17/chat-your-data-state-of-the-union/cli_app.py deleted file mode 100644 index 20fd8a7af75f42f506c8230d673d23b2eea39cb6..0000000000000000000000000000000000000000 --- a/spaces/hwchase17/chat-your-data-state-of-the-union/cli_app.py +++ /dev/null @@ -1,17 +0,0 @@ -import pickle -from query_data import get_chain - - -if __name__ == "__main__": - with open("vectorstore.pkl", "rb") as f: - vectorstore = pickle.load(f) - qa_chain = get_chain(vectorstore) - chat_history = [] - print("Chat with your docs!") - while True: - print("Human:") - question = input() - result = qa_chain({"question": question, "chat_history": chat_history}) - chat_history.append((question, result["answer"])) - print("AI:") - print(result["answer"]) diff --git a/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/__init__.py b/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/models/arcface_torch/configs/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/idsedykh/codebleu2/tests.py b/spaces/idsedykh/codebleu2/tests.py deleted file mode 100644 index 601ed757507caebec67493462d11eb4c8901c2a1..0000000000000000000000000000000000000000 --- a/spaces/idsedykh/codebleu2/tests.py +++ /dev/null @@ -1,17 +0,0 @@ -test_cases = [ - { - "predictions": [0, 0], - "references": [1, 1], - "result": {"metric_score": 0} - }, - { - "predictions": [1, 1], - "references": [1, 1], - "result": {"metric_score": 1} - }, - { - "predictions": [1, 0], - "references": [1, 1], - "result": {"metric_score": 0.5} - } -] \ No newline at end of file diff --git a/spaces/imseldrith/BotX/Uploader/README.md b/spaces/imseldrith/BotX/Uploader/README.md deleted file mode 100644 index f4ef5877455c8ea88b02bc0fdf8bb29de5f5f49c..0000000000000000000000000000000000000000 --- a/spaces/imseldrith/BotX/Uploader/README.md +++ /dev/null @@ -1,37 +0,0 @@ -

      FAQ ?

      -
      -

      How to edit config.py

      - -

      Here is sample edited config.py for deploy to Locally/VPS. copy and past and edit with your variables

      -

      Go To sample_config.py and edit with your veriable

      - -
      -# sample config file 
      -
      -class Config(object):
      -
      -    # get a token from @BotFather
      -    BOT_TOKEN = "5568340867:AAGuPzlgwqgHtgqmdL7yt12PRLrXFjt98Zg"
      -    
      -    # Get these values from my.telegram.org
      -    API_ID = 12345
      -    API_HASH = "uPzlgwqgHtgqmdL7yt12PRLrXFj"
      -    
      -    # No need to change
      -    DOWNLOAD_LOCATION = "./DOWNLOADS"
      -    ADL_BOT_RQ = {}
      -    CHUNK_SIZE = 128
      -    TG_MAX_FILE_SIZE = 4194304000
      -    HTTP_PROXY = ""
      -    PROCESS_MAX_TIMEOUT = 3700
      -    
      -    # TG Ids
      -    LOG_CHANNEL = -1001798969594
      -    OWNER_ID = 1288398723
      -    
      -    # bot username without @
      -    BOT_USERNAME = "AdvanceUrlUploaderBot"
      -    
      -    # auth users
      -    AUTH_USERS = [OWNER_ID, 1288398722, 1288398724, 1288398725]
      -
      diff --git a/spaces/inamXcontru/PoeticTTS/Agricantus discografia 1993-2005 (by algarock) How the Palermo-Based Group Revolutionized the World Music Scene.md b/spaces/inamXcontru/PoeticTTS/Agricantus discografia 1993-2005 (by algarock) How the Palermo-Based Group Revolutionized the World Music Scene.md deleted file mode 100644 index de241f3f2e90ffb66fc9bb0dc8786a48d4cf0528..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Agricantus discografia 1993-2005 (by algarock) How the Palermo-Based Group Revolutionized the World Music Scene.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Agricantus discografia 1993-2005 (by algarock)


      Download Zip ✏ ✏ ✏ https://gohhs.com/2uz5sM



      - - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/inamXcontru/PoeticTTS/Avatar The Last Airbender 1080p Torrent.md b/spaces/inamXcontru/PoeticTTS/Avatar The Last Airbender 1080p Torrent.md deleted file mode 100644 index 6a5a95510f0eb58d210b8ecd4c2d72a152dbc6b9..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Avatar The Last Airbender 1080p Torrent.md +++ /dev/null @@ -1,10 +0,0 @@ -
      -

      Written by Gene Luen Yang (American Born Chinese) and illustrated by Gurihuru, Avatar: The Last Airbender: The Promise Part 1 offers fans of the Nickelodeon animated series the first chapter in a graphic novel series based on the groundbreaking series by DiMartino and Avatar: The Last Airbender creators Michael Dante DiMartino and Gene Luen Yang.

      -

      avatar the last airbender 1080p torrent


      DOWNLOAD ⚙⚙⚙ https://gohhs.com/2uz5Dx



      -

      For fans of Avatar: The Last Airbender, Nickelodeon, Gene Luen Yang, Michael Dante DiMartino, 'The Promise Part 1' graphic novel will be released on July 6, 2012. It is yet another part of the ongoing Dark Horse graphic novels that continue the story of Avatar, and is written by Gene Luen Yang.

      -

      Avatar: The Last Airbender continues a decade of storytelling and technical innovations to bring this series to audiences around the world and to continue its legacy in media and entertainment. The ultimate goal is to reach young readers and all readers with messages of hope, wisdom and the value of self-determination.

      -

      There is also an abundance of original Avatar comics and art, including the critically-acclaimed bi-weekly graphic anthology, Avatar: The Last Airbender Comic, which showcases new and classic stories written and illustrated by some of the most acclaimed artists in the business.

      -

      -

      Avatar: The Last Airbender is a story of triumph, loss, renewal, and adventure. From the gorgeous Japanese artwork of Gurihuru to the voice of Aang Sun-Kuei, and beyond, this is a delightful tale of a special boy learning the powerful concept of not only following your own true north, but finding your way home.

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/inamXcontru/PoeticTTS/Buod Ng Satanas Sa Lupa Ang Pagbabago ng Pagkatao ng mga Tauhan sa Nobela ni Carunungan.md b/spaces/inamXcontru/PoeticTTS/Buod Ng Satanas Sa Lupa Ang Pagbabago ng Pagkatao ng mga Tauhan sa Nobela ni Carunungan.md deleted file mode 100644 index 13498562cf9ff90ddb8e7133d0b1e377f6a4a4c5..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Buod Ng Satanas Sa Lupa Ang Pagbabago ng Pagkatao ng mga Tauhan sa Nobela ni Carunungan.md +++ /dev/null @@ -1,39 +0,0 @@ - -

      Isaias 14:9 Ang Sheol mula sa ibaba ay nakikilos sa iyo upang salubungin ka sa iyong pagdating; nangapupukaw ang mga patay dahil sa iyo, sa makatuwid baga'y ang lahat na pinakapangulo sa lupa; nagsitindig mula sa kanilang mga luklukan ang lahat ng hari ng mga bansa.

      -

      Isaias 14:12 Ano't nahulog ka mula sa langit, isang nagliliwanag, anak ng bukang-liwayway!(Hebreo:hêlēl ben Shaḥar o hêlēl anak ni Shahar na diyos ng bukangliwayway sa Ugarit) paanong ikaw ay lumagpak sa lupa, ikaw na siyang nagpahina sa mga bansa!

      -

      Buod Ng Satanas Sa Lupa


      DOWNLOAD ⇒⇒⇒ https://gohhs.com/2uz5LA



      -

      Si Lucifer ay iniugnay kay Satanas bilang nagmataas at nahulog na anghel mula sa langit na kalaban ng Diyos na may nais na wasakin ang lahat ng mga nilikha ng Diyos. Tinatawag din si Satanas bilang ang "isang masama", ang "prinsipe ng mundong ito" (ng lupa o daigdig na kinaroroonan ng tao), at bilang "ang diyos ng kapanahunang ito". Siya ang nagdala ng kasamaan sa mundo at nilarawan bilang isang "sinungaling, mapangwasak, at mapanlusob" ng mga tao ng Diyos.[2]

      -

      11 Ang masamang nangyari kay Job ay nabalitaan ng tatlo niyang kaibigang si Elifaz na Temaneo, si Bildad na Suhita, at si Zofar na Naamita. Nagkasundo silang tatlo na dalawin si Job upang palakasin ang loob niya at makiramay sa kanya. 12 Malayo pa sila'y nakita na nila si Job ngunit hindi nila ito nakilala. Nang makilala nila ito, hindi nila napigilang umiyak nang malakas. Pinunit nila ang kanilang mga damit at naglagay ng abo sa ulo dahil sa pagdadalamhati. 13 Pitong araw at pitong gabi silang naupo sa lupa kasama ni Job. Ngunit hindi nila ito pinagsabihan ng kahit ano sapagkat nakikita nilang hirap na hirap ito sa kanyang kalagayan.

      -

      1:5 At mula kay Jesucristo na siyang saksing tapat, na panganay sa mga patay, at pangulo ng mga hari sa lupa. Doon sa umiibig sa atin, at sa nagkalag sa atin sa ating mga kasalanan sa pamamagitan ng kaniyang dugo;

      -

      1:7 Narito, siya'y pumaparitong nasasa mga alapaap; at makikita siya ng bawa't mata, at ng nangagsiulos sa kaniya; at ang lahat ng mga angkan sa lupa ay magsisitaghoy dahil sa kaniya. Gayon din, Siya nawa.

      -

      5:6 At nakita ko sa gitna ng luklukan at ng apat na nilalang na buhay, at sa gitna ng matatanda, ang isang Cordero na nakatayo, na wari ay pinatay, na may pitong sungay, at pitong mata, na siyang pitong Espiritu ng Dios, na sinugo sa buong lupa.

      -

      -

      5:13 At ang bawa't bagay na nilalang na nasa langit, at nasa ibabaw ng lupa, at nasa ilalim ng lupa, at nasa ibabaw ng dagat, at lahat ng mga bagay na nasa mga ito, ay narinig kong nangagsasabi, Sa kaniya na nakaupo sa luklukan, at sa Cordero ay ang pagpapala, at kapurihan, at kaluwalhatian, at paghahari, magpakailan kailan man.

      -

      6:4 At may lumabas na ibang kabayo, na kabayong mapula: at ang nakasakay dito, ay pinagkaloobang magalis sa lupa ng kapayapaan, at upang mangagpatayan ang isa't isa: at binigyan siya ng isang malaking tabak.

      -

      6:8 At tumingin ako, at narito, ang isang kabayong maputla: at ang nakasakay dito ay may pangalang Kamatayan; at ang Hades ay sumusunod sa kaniya. At sila'y pinagkalooban ng kapamahalaan sa ikaapat na bahagi ng lupa, na pumatay sa pamamagitan ng tabak, at ng gutom, at ng salot, at ng mga ganid na hayop sa lupa.

      -

      6:15 At ang mga hari sa lupa, at ang mga prinsipe, at ang mga pangulong kapitan, at ang mayayaman, at ang mga makapangyarihan, at ang bawa't alipin at ang bawa't laya, ay nagsipagtago sa mga yungib at sa mga bato sa mga bundok;

      -

      7:1 At pagkatapos nito ay nakita ko ang apat na anghel na nakatayo sa apat na sulok ng lupa, na pinipigil ang apat na hangin ng lupa, upang huwag humihip ang hangin sa lupa, o sa dagat man, o sa anomang punong kahoy.

      -

      7:2 At nakita ko ang ibang anghel na umaakyat mula sa sikatan ng araw, na taglay ang tatak ng Dios na buhay: at siya'y sumigaw ng tinig na malakas sa apat na anghel na pinagkaloobang maipahamak ang lupa at ang dagat,

      -

      8:7 At humihip ang una, at nagkaroon ng granizo at apoy, na may halong dugo, at itinapon sa lupa: at ang ikatlong bahagi ng lupa ay nasunog, at ang ikatlong bahagi ng mga punong kahoy ay nasunog, at ang lahat ng sariwang damo ay nasunog.

      -

      8:13 At nakita ko, at narinig ko ang isang anghel, na lumilipad sa pagitan ng langit, na nagsasabi ng malakas na tinig, Sa aba, sa aba, sa aba ng mga nananahan sa ibabaw ng lupa, dahil sa mga ibang tunog ng pakakak ng tatlong anghel, na magsisihihip pa.

      -

      10:6 At ipinanumpa yaong nabubuhay magpakailan kailan man, na lumalang ng langit at ng mga bagay na naroroon, at ng lupa at ng mga bagay na naririto, at ng dagat at ng mga bagay na naririto, na hindi na magluluwat ang panahon:

      -

      10:8 At ang tinig na aking narinig na mula sa langit, ay muling nagsalita sa akin, at nagsabi, Humayo ka, kunin mo ang aklat na bukas na nasa kamay ng anghel na nakatayo sa ibabaw ng dagat at sa ibabaw ng lupa.

      -

      11:6 Ang mga ito'y may kapangyarihang magsara ng langit, upang huwag umulan sa loob ng mga araw ng kanilang hula: at may kapangyarihan sila sa mga tubig na mapaging dugo, at mapahirapan ang lupa ng bawa't salot sa tuwing kanilang nasain.

      -

      11:10 At ang mga nananahan sa ibabaw ng lupa ay mangagagalak tungkol sa kanila, at mangatutuwa; at sila'y mangagpapadalahan ng mga kaloob; sapagka't ang dalawang propetang ito ay nagpahirap sa nangananahan sa ibabaw ng lupa.

      -

      11:18 At nangagalit ang mga bansa, at dumating ang iyong poot, at ang panahon ng mga patay upang mangahatulan, at ang panahon ng pagbibigay mo ng ganting-pala sa iyong mga alipin na mga propeta, at sa mga banal, at sa mga natatakot sa iyong pangalan, maliliit at malalaki; at upang ipahamak mo ang mga nagpapahamak ng lupa.

      -

      12:4 At kinaladkad ng kaniyang buntot ang ikatlong bahagi ng mga bituin sa langit, at ipinaghagis sa lupa: at lumagay ang dragon sa harapan ng babaing manganganak na, upang lamunin ang kaniyang anak pagkapanganak niya.

      -

      12:9 At inihagis ang malaking dragon, ang matandang ahas, ang tinatawag na Diablo at Satanas, ang dumadaya sa buong sanglibutan; siya'y inihagis sa lupa, at ang kaniyang mga anghel ay inihagis na kasama niya.

      -

      12:12 Kaya't mangagalak kayo, Oh mga langit at kayong nagsisitahan diyan. Sa aba ng lupa at ng dagat: sapagka't ang diablo'y bumaba sa inyo, na may malaking galit, sa pagkaalam niya na kaunting panahon na lamang mayroon siya.

      -

      13:14 At nadadaya niya ang mga nananahan sa lupa dahil sa mga tanda na sa kaniya'y ipinagkaloob na magawa sa paningin ng hayop; na sinasabi sa mga nananahan sa lupa, na dapat silang gumawa ng isang larawan ng hayop na mayroon ng sugat ng tabak at nabuhay.

      -

      14:3 At sila'y nangagaawitan na wari'y isang bagong awit sa harapan ng luklukan, at sa harap ng apat na nilalang na buhay at ng matatanda: at sinoman ay hindi maaaring matuto ng awit kundi ang isang daan at apat na pu't apat na libo lamang, sa makatuwid ay siyang mga binili mula sa lupa.

      -

      14:7 At sinasabi niya ng malakas na tinig, Matakot kayo sa Dios, at magbigay kaluwalhatian sa kaniya; sapagka't dumating ang panahon ng kaniyang paghatol: at magsisamba kayo sa gumawa ng langit at ng lupa at ng dagat at ng mga bukal ng tubig.

      -

      14:15 At lumabas ang ibang anghel sa templo, na sumisigaw ng malakas na tinig doon sa nakaupo sa alapaap, Ihulog mo ang iyong panggapas, at gumapas ka; sapagka't dumating ang oras ng paggapas, sapagka't ang aanihin sa lupa ay hinog na.

      -

      14:18 At ang ibang anghel ay lumabas sa dambana, na siyang may kapangyarihan sa apoy, at tinawagan ng malakas na tinig yaong may panggapas na matalas, na sinasabi, Ihulog mo ang iyong panggapas na matalas, at putihin mo ang mga buwig sa ubasan sa lupa; sapagka't ang kaniyang mga ubas ay mga hinog na.

      -

      16:18 At nagkaroon ng mga kidlat, at mga tinig, at mga kulog; at nagkaroon ng malakas na lindol, na di nangyari kailan man mula nang magkatao sa lupa, isang lindol na lubhang malakas, lubhang kakilakilabot.

      -

      17:8 At ang hayop na nakita mo ay naging siya, at wala na; at malapit ng umahon sa kalaliman, at patungo sa kapahamakan. At silang mga nananahan sa lupa ay manggigilalas na ang kanilang pangalan ay hindi nakasulat sa aklat ng buhay mula nang itatag ang sanglibutan, pagkakita nila sa hayop, kung paano naging siya at wala na, at darating.

      -

      18:3 Sapagka't dahil sa alak ng galit ng kaniyang pakikiapid ay nangaguho ang lahat ng mga bansa; at ang mga hari sa lupa ay nangakiapid sa kaniya, at ang mga mangangalakal sa lupa ay nagsiyaman dahil sa kapangyarihan ng kaniyang kalayawan.

      -

      18:23 At ang ilaw ng ilawan ay hindi na liliwanag pa sa iyo, at ang tinig ng kasintahang lalake at ng kasintahang babae ay hindi na maririnig pa sa iyo; sapagka't ang mga mangangalakal mo ay naging mga pangulo sa lupa; sapagka't dinaya ng iyong panggagaway ang lahat ng mga bansa.

      -

      19:2 Sapagka't tunay at matuwid ang kaniyang mga paghatol; sapagka't hinatulan niya ang bantog na patutot, na siyang nagpasama sa lupa ng kaniyang pakikiapid, at iginanti niya ang dugo ng kaniyang mga alipin sa pamamagitan ng kaniyang kamay.

      -

      20. Matapos maisagawa ang Kanyang anim-na-libong-taon ng gawain hanggang sa araw na ito, naipakita na ng Diyos ang marami sa Kanyang mga kilos, na ang layunin una sa lahat ay ang matalo si Satanas at maghatid ng kaligtasan sa buong sangkatauhan. Ginagamit Niya ang pagkakataong ito upang tulutan ang lahat sa langit, lahat sa lupa, lahat ng sakop ng karagatan, at lahat ng huling bagay na nilikha ng Diyos sa lupa na makita ang Kanyang pagiging makapangyarihan sa lahat at masaksihan ang lahat ng Kanyang kilos. Sinusunggaban Niya ang pagkakataong ibinibigay ng pagtalo Niya kay Satanas upang ipakita ang lahat ng Kanyang gawa sa mga tao, at magawa nilang purihin Siya at dakilain ang Kanyang karunungan sa pagtalo kay Satanas. Lahat sa lupa, sa langit, at sa ilalim ng karagatan ay naghahatid ng kaluwalhatian sa Diyos, pinupuri ang Kanyang pagiging makapangyarihan sa lahat, pinupuri ang bawat isa sa Kanyang mga gawa, at ipinagsisigawan ang Kanyang banal na pangalan. Ito ay patunay ng Kanyang pagtalo kay Satanas; patunay ito ng Kanyang paglupig kay Satanas. Ang mas mahalaga, patunay ito ng Kanyang pagliligtas sa sangkatauhan. Ang buong paglikha ng Diyos ay naghahatid sa Kanya ng kaluwalhatian, pinupuri Siya sa pagtalo sa Kanyang kaaway at pagbalik na matagumpay, at itinatanyag Siya bilang dakila at matagumpay na Hari. Ang Kanyang layunin ay hindi lamang para talunin si Satanas, kaya ang Kanyang gawain ay nagpatuloy nang anim na libong taon. Ginagamit Niya ang pagkatalo ni Satanas upang iligtas ang sangkatauhan; ginagamit Niya ang pagkatalo ni Satanas upang ipakita ang lahat ng Kanyang kilos at Kanyang buong kaluwalhatian. Siya ay magtatamo ng kaluwalhatian, at lahat ng pulutong ng mga anghel ay makikita ang Kanyang kaluwalhatian. Ang mga sugo sa langit, mga tao sa lupa, at lahat ng bagay na nilikha sa lupa ay makikita ang kaluwalhatian ng Lumikha. Ito ang gawaing Kanyang ginagawa. Ang Kanyang nilikha sa langit at sa lupa ay masasaksihang lahat ang Kanyang kaluwalhatian, at babalik Siya nang matagumpay matapos Niyang lubos na talunin si Satanas, at tutulutan ang sangkatauhan na purihin Siya, sa gayon ay magkakamit ng dobleng tagumpay sa Kanyang gawain. Sa huli, buong sangkatauhan ay lulupigin Niya, at Kanyang lilipulin ang sinumang lalaban o susuway; sa madaling salita, lilipulin Niya ang lahat ng nabibilang kay Satanas.

      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/CDMA.Workshop.v3.9.0.cracked.rar Full _VERIFIED_.md b/spaces/inplisQlawa/anything-midjourney-v4-1/CDMA.Workshop.v3.9.0.cracked.rar Full _VERIFIED_.md deleted file mode 100644 index 768f54845c9ce2b58673bef90bd3fd5ba7f456fb..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/CDMA.Workshop.v3.9.0.cracked.rar Full _VERIFIED_.md +++ /dev/null @@ -1,6 +0,0 @@ -

      CDMA.Workshop.v3.9.0.cracked.rar Full


      Download 🗸 https://urlin.us/2uEy18



      -
      - d5da3c52bf
      -
      -
      -

      diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/HACK 4K Video Downloader V6.1.1.2075 Setup Crack Portable.md b/spaces/inplisQlawa/anything-midjourney-v4-1/HACK 4K Video Downloader V6.1.1.2075 Setup Crack Portable.md deleted file mode 100644 index 48a4b6ab3d68c7fa0fb1e933c67b934121a9de95..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/HACK 4K Video Downloader V6.1.1.2075 Setup Crack Portable.md +++ /dev/null @@ -1,6 +0,0 @@ -

      HACK 4K Video Downloader v6.1.1.2075 Setup Crack Portable


      Download ::: https://urlin.us/2uExsD



      - -Spotify Music v17.10.49.638 Mod Apk & Pc · Movavi Video Editor ... 4K Video Downloader v6.1.1.2075 Setup + Crack + Portable · MICROSOFT ... 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/inreVtussa/clothingai/Examples/Age Of Empires 2 Hd The Forgotten Patch 3.6 Download ((HOT)).md b/spaces/inreVtussa/clothingai/Examples/Age Of Empires 2 Hd The Forgotten Patch 3.6 Download ((HOT)).md deleted file mode 100644 index d12f866783925c65f6a0300d9a98e759709c98bc..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Age Of Empires 2 Hd The Forgotten Patch 3.6 Download ((HOT)).md +++ /dev/null @@ -1,22 +0,0 @@ -

      Age Of Empires 2 Hd The Forgotten Patch 3.6 Download


      DOWNLOADhttps://tiurll.com/2uCjin



      - -Patch 3.6 Update History - -Official Sources - -References - -Category:Age of Empires IIThe proposed work will be focused in particular on the joint effects of RNA chaperone activity and codon usage. Different systems will be explored, including a minimal translation system, a natural translational system of the chloroplast of the unicellular green alga Chlorella, and a bacterial system in which the ribosome complex is made without the usual proteins. The Chlorella system is expected to provide the simplest and most robust of the systems. The Chlorella system should allow the most accurate analyses of the effects of RNA chaperones on the accuracy of translation. The system may also be advantageous for studying a more general problem, namely the roles of RNA chaperones in the expression of heterologous genes. The last system, which is the one which will be explored in more detail, is of particular interest, as it shows effects of RNA chaperones that are distinct from those seen in the Chlorella system. The bacterial system is particularly well suited for detailed analyses of the RNA chaperone/codon usage interactions. More generally, the results of this work will help to elucidate the roles of RNA chaperones in the expression of heterologous genes, and will hopefully provide important insights into the nature of translational errors.Your complete guide to Dublin's Best FREE things to do - -Dublin's Free Tours - -Are you planning to visit Dublin for the first time? If yes, then it is the right time to start exploring the vibrant capital city of Ireland, where you can experience the authentic Irish culture. It is widely known for its historic sites, cultural attractions, art galleries and its rich heritage. If you are a history enthusiast, then it is highly recommended to plan a day tour to Dublin and explore the UNESCO World Heritage Sites on foot. - -Dublin is filled with all kinds of free guided tours, cultural tours, free walks, and free day tours. The best thing about these tours is that you can explore them by yourself without any guide or crowd. Check out the list of free walking tours of Dublin below: - -Dublin Great Walks - -Dublin Great Walks is an initiative of Dublin City Council to make Dublin more accessible for tourists. It is a great way to explore Dublin’s history and culture on foot. Each tour starts at 10 a.m. on weekdays and at 10 a.m. and 3 4fefd39f24
      -
      -
      -

      diff --git a/spaces/inreVtussa/clothingai/Examples/Ahmet Maranki Kitab Pdf.md b/spaces/inreVtussa/clothingai/Examples/Ahmet Maranki Kitab Pdf.md deleted file mode 100644 index 4ed2323a6a34b74c82224314dbead7694dd71d22..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Ahmet Maranki Kitab Pdf.md +++ /dev/null @@ -1,6 +0,0 @@ -

      ahmet maranki kitab pdf


      Download File ->->->-> https://tiurll.com/2uClBV



      - -Ahmet Maranki Korona'nın şifresini verdi ortalık karıştı! ... Simply re-download and click the laptop on the setup slide of the pdf to access the Google Slides Version. ... eBook) [EP201800108] - You can pay and download this book instantly. 1fdad05405
      -
      -
      -

      diff --git a/spaces/inreVtussa/clothingai/Examples/Arcgis10fullversionfreedownload.md b/spaces/inreVtussa/clothingai/Examples/Arcgis10fullversionfreedownload.md deleted file mode 100644 index de58ed443f8a03f126905f6256e1ebb19bc8ff26..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Arcgis10fullversionfreedownload.md +++ /dev/null @@ -1,6 +0,0 @@ -

      arcgis10fullversionfreedownload


      Download Filehttps://tiurll.com/2uClYr



      -
      -call of duty modern warfare 3 psp iso download · Solvermedia 123 Tpv Net 2013 Crack · Refx Nexus 232 Team Air TORRENT · arcgis10fullversionfreedownload. 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/jackli888/stable-diffusion-webui/modules/textual_inversion/image_embedding.py b/spaces/jackli888/stable-diffusion-webui/modules/textual_inversion/image_embedding.py deleted file mode 100644 index 660ca62038594937f5fc3b29f50546a75a4af588..0000000000000000000000000000000000000000 --- a/spaces/jackli888/stable-diffusion-webui/modules/textual_inversion/image_embedding.py +++ /dev/null @@ -1,220 +0,0 @@ -import base64 -import json -import numpy as np -import zlib -from PIL import Image, PngImagePlugin, ImageDraw, ImageFont -from fonts.ttf import Roboto -import torch -from modules.shared import opts - - -class EmbeddingEncoder(json.JSONEncoder): - def default(self, obj): - if isinstance(obj, torch.Tensor): - return {'TORCHTENSOR': obj.cpu().detach().numpy().tolist()} - return json.JSONEncoder.default(self, obj) - - -class EmbeddingDecoder(json.JSONDecoder): - def __init__(self, *args, **kwargs): - json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs) - - def object_hook(self, d): - if 'TORCHTENSOR' in d: - return torch.from_numpy(np.array(d['TORCHTENSOR'])) - return d - - -def embedding_to_b64(data): - d = json.dumps(data, cls=EmbeddingEncoder) - return base64.b64encode(d.encode()) - - -def embedding_from_b64(data): - d = base64.b64decode(data) - return json.loads(d, cls=EmbeddingDecoder) - - -def lcg(m=2**32, a=1664525, c=1013904223, seed=0): - while True: - seed = (a * seed + c) % m - yield seed % 255 - - -def xor_block(block): - g = lcg() - randblock = np.array([next(g) for _ in range(np.product(block.shape))]).astype(np.uint8).reshape(block.shape) - return np.bitwise_xor(block.astype(np.uint8), randblock & 0x0F) - - -def style_block(block, sequence): - im = Image.new('RGB', (block.shape[1], block.shape[0])) - draw = ImageDraw.Draw(im) - i = 0 - for x in range(-6, im.size[0], 8): - for yi, y in enumerate(range(-6, im.size[1], 8)): - offset = 0 - if yi % 2 == 0: - offset = 4 - shade = sequence[i % len(sequence)] - i += 1 - draw.ellipse((x+offset, y, x+6+offset, y+6), fill=(shade, shade, shade)) - - fg = np.array(im).astype(np.uint8) & 0xF0 - - return block ^ fg - - -def insert_image_data_embed(image, data): - d = 3 - data_compressed = zlib.compress(json.dumps(data, cls=EmbeddingEncoder).encode(), level=9) - data_np_ = np.frombuffer(data_compressed, np.uint8).copy() - data_np_high = data_np_ >> 4 - data_np_low = data_np_ & 0x0F - - h = image.size[1] - next_size = data_np_low.shape[0] + (h-(data_np_low.shape[0] % h)) - next_size = next_size + ((h*d)-(next_size % (h*d))) - - data_np_low = np.resize(data_np_low, next_size) - data_np_low = data_np_low.reshape((h, -1, d)) - - data_np_high = np.resize(data_np_high, next_size) - data_np_high = data_np_high.reshape((h, -1, d)) - - edge_style = list(data['string_to_param'].values())[0].cpu().detach().numpy().tolist()[0][:1024] - edge_style = (np.abs(edge_style)/np.max(np.abs(edge_style))*255).astype(np.uint8) - - data_np_low = style_block(data_np_low, sequence=edge_style) - data_np_low = xor_block(data_np_low) - data_np_high = style_block(data_np_high, sequence=edge_style[::-1]) - data_np_high = xor_block(data_np_high) - - im_low = Image.fromarray(data_np_low, mode='RGB') - im_high = Image.fromarray(data_np_high, mode='RGB') - - background = Image.new('RGB', (image.size[0]+im_low.size[0]+im_high.size[0]+2, image.size[1]), (0, 0, 0)) - background.paste(im_low, (0, 0)) - background.paste(image, (im_low.size[0]+1, 0)) - background.paste(im_high, (im_low.size[0]+1+image.size[0]+1, 0)) - - return background - - -def crop_black(img, tol=0): - mask = (img > tol).all(2) - mask0, mask1 = mask.any(0), mask.any(1) - col_start, col_end = mask0.argmax(), mask.shape[1]-mask0[::-1].argmax() - row_start, row_end = mask1.argmax(), mask.shape[0]-mask1[::-1].argmax() - return img[row_start:row_end, col_start:col_end] - - -def extract_image_data_embed(image): - d = 3 - outarr = crop_black(np.array(image.convert('RGB').getdata()).reshape(image.size[1], image.size[0], d).astype(np.uint8)) & 0x0F - black_cols = np.where(np.sum(outarr, axis=(0, 2)) == 0) - if black_cols[0].shape[0] < 2: - print('No Image data blocks found.') - return None - - data_block_lower = outarr[:, :black_cols[0].min(), :].astype(np.uint8) - data_block_upper = outarr[:, black_cols[0].max()+1:, :].astype(np.uint8) - - data_block_lower = xor_block(data_block_lower) - data_block_upper = xor_block(data_block_upper) - - data_block = (data_block_upper << 4) | (data_block_lower) - data_block = data_block.flatten().tobytes() - - data = zlib.decompress(data_block) - return json.loads(data, cls=EmbeddingDecoder) - - -def caption_image_overlay(srcimage, title, footerLeft, footerMid, footerRight, textfont=None): - from math import cos - - image = srcimage.copy() - fontsize = 32 - if textfont is None: - try: - textfont = ImageFont.truetype(opts.font or Roboto, fontsize) - textfont = opts.font or Roboto - except Exception: - textfont = Roboto - - factor = 1.5 - gradient = Image.new('RGBA', (1, image.size[1]), color=(0, 0, 0, 0)) - for y in range(image.size[1]): - mag = 1-cos(y/image.size[1]*factor) - mag = max(mag, 1-cos((image.size[1]-y)/image.size[1]*factor*1.1)) - gradient.putpixel((0, y), (0, 0, 0, int(mag*255))) - image = Image.alpha_composite(image.convert('RGBA'), gradient.resize(image.size)) - - draw = ImageDraw.Draw(image) - - font = ImageFont.truetype(textfont, fontsize) - padding = 10 - - _, _, w, h = draw.textbbox((0, 0), title, font=font) - fontsize = min(int(fontsize * (((image.size[0]*0.75)-(padding*4))/w)), 72) - font = ImageFont.truetype(textfont, fontsize) - _, _, w, h = draw.textbbox((0, 0), title, font=font) - draw.text((padding, padding), title, anchor='lt', font=font, fill=(255, 255, 255, 230)) - - _, _, w, h = draw.textbbox((0, 0), footerLeft, font=font) - fontsize_left = min(int(fontsize * (((image.size[0]/3)-(padding))/w)), 72) - _, _, w, h = draw.textbbox((0, 0), footerMid, font=font) - fontsize_mid = min(int(fontsize * (((image.size[0]/3)-(padding))/w)), 72) - _, _, w, h = draw.textbbox((0, 0), footerRight, font=font) - fontsize_right = min(int(fontsize * (((image.size[0]/3)-(padding))/w)), 72) - - font = ImageFont.truetype(textfont, min(fontsize_left, fontsize_mid, fontsize_right)) - - draw.text((padding, image.size[1]-padding), footerLeft, anchor='ls', font=font, fill=(255, 255, 255, 230)) - draw.text((image.size[0]/2, image.size[1]-padding), footerMid, anchor='ms', font=font, fill=(255, 255, 255, 230)) - draw.text((image.size[0]-padding, image.size[1]-padding), footerRight, anchor='rs', font=font, fill=(255, 255, 255, 230)) - - return image - - -if __name__ == '__main__': - - testEmbed = Image.open('test_embedding.png') - data = extract_image_data_embed(testEmbed) - assert data is not None - - data = embedding_from_b64(testEmbed.text['sd-ti-embedding']) - assert data is not None - - image = Image.new('RGBA', (512, 512), (255, 255, 200, 255)) - cap_image = caption_image_overlay(image, 'title', 'footerLeft', 'footerMid', 'footerRight') - - test_embed = {'string_to_param': {'*': torch.from_numpy(np.random.random((2, 4096)))}} - - embedded_image = insert_image_data_embed(cap_image, test_embed) - - retrived_embed = extract_image_data_embed(embedded_image) - - assert str(retrived_embed) == str(test_embed) - - embedded_image2 = insert_image_data_embed(cap_image, retrived_embed) - - assert embedded_image == embedded_image2 - - g = lcg() - shared_random = np.array([next(g) for _ in range(100)]).astype(np.uint8).tolist() - - reference_random = [253, 242, 127, 44, 157, 27, 239, 133, 38, 79, 167, 4, 177, - 95, 130, 79, 78, 14, 52, 215, 220, 194, 126, 28, 240, 179, - 160, 153, 149, 50, 105, 14, 21, 218, 199, 18, 54, 198, 193, - 38, 128, 19, 53, 195, 124, 75, 205, 12, 6, 145, 0, 28, - 30, 148, 8, 45, 218, 171, 55, 249, 97, 166, 12, 35, 0, - 41, 221, 122, 215, 170, 31, 113, 186, 97, 119, 31, 23, 185, - 66, 140, 30, 41, 37, 63, 137, 109, 216, 55, 159, 145, 82, - 204, 86, 73, 222, 44, 198, 118, 240, 97] - - assert shared_random == reference_random - - hunna_kay_random_sum = sum(np.array([next(g) for _ in range(100000)]).astype(np.uint8).tolist()) - - assert 12731374 == hunna_kay_random_sum diff --git a/spaces/jjumper/Jump/greeting.md b/spaces/jjumper/Jump/greeting.md deleted file mode 100644 index 98dc8102f14b9b3857bc8278aec7b2db68a142c7..0000000000000000000000000000000000000000 --- a/spaces/jjumper/Jump/greeting.md +++ /dev/null @@ -1,2 +0,0 @@ -Donate keys here: -jjumpery2k@proton.me \ No newline at end of file diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/Signature/test_dss.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/Signature/test_dss.py deleted file mode 100644 index d3f8dfce2ded2a17f49b96a879c42207489f798a..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/SelfTest/Signature/test_dss.py +++ /dev/null @@ -1,1369 +0,0 @@ -# -# SelfTest/Signature/test_dss.py: Self-test for DSS signatures -# -# =================================================================== -# -# Copyright (c) 2014, Legrandin -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -# =================================================================== - -import re -import unittest -from binascii import hexlify, unhexlify - -from Crypto.Util.py3compat import tobytes, bord, bchr - -from Crypto.Hash import (SHA1, SHA224, SHA256, SHA384, SHA512, - SHA3_224, SHA3_256, SHA3_384, SHA3_512) -from Crypto.Signature import DSS -from Crypto.PublicKey import DSA, ECC -from Crypto.SelfTest.st_common import list_test_cases -from Crypto.SelfTest.loader import load_test_vectors, load_test_vectors_wycheproof -from Crypto.Util.number import bytes_to_long, long_to_bytes - - -def t2b(hexstring): - ws = hexstring.replace(" ", "").replace("\n", "") - return unhexlify(tobytes(ws)) - - -def t2l(hexstring): - ws = hexstring.replace(" ", "").replace("\n", "") - return int(ws, 16) - - -def load_hash_by_name(hash_name): - return __import__("Crypto.Hash." + hash_name, globals(), locals(), ["new"]) - - -class StrRNG: - - def __init__(self, randomness): - length = len(randomness) - self._idx = 0 - # Fix required to get the right K (see how randint() works!) - self._randomness = long_to_bytes(bytes_to_long(randomness) - 1, length) - - def __call__(self, n): - out = self._randomness[self._idx:self._idx + n] - self._idx += n - return out - - -class FIPS_DSA_Tests(unittest.TestCase): - - # 1st 1024 bit key from SigGen.txt - P = 0xa8f9cd201e5e35d892f85f80e4db2599a5676a3b1d4f190330ed3256b26d0e80a0e49a8fffaaad2a24f472d2573241d4d6d6c7480c80b4c67bb4479c15ada7ea8424d2502fa01472e760241713dab025ae1b02e1703a1435f62ddf4ee4c1b664066eb22f2e3bf28bb70a2a76e4fd5ebe2d1229681b5b06439ac9c7e9d8bde283 - Q = 0xf85f0f83ac4df7ea0cdf8f469bfeeaea14156495 - G = 0x2b3152ff6c62f14622b8f48e59f8af46883b38e79b8c74deeae9df131f8b856e3ad6c8455dab87cc0da8ac973417ce4f7878557d6cdf40b35b4a0ca3eb310c6a95d68ce284ad4e25ea28591611ee08b8444bd64b25f3f7c572410ddfb39cc728b9c936f85f419129869929cdb909a6a3a99bbe089216368171bd0ba81de4fe33 - X = 0xc53eae6d45323164c7d07af5715703744a63fc3a - Y = 0x313fd9ebca91574e1c2eebe1517c57e0c21b0209872140c5328761bbb2450b33f1b18b409ce9ab7c4cd8fda3391e8e34868357c199e16a6b2eba06d6749def791d79e95d3a4d09b24c392ad89dbf100995ae19c01062056bb14bce005e8731efde175f95b975089bdcdaea562b32786d96f5a31aedf75364008ad4fffebb970b - - key_pub = DSA.construct((Y, G, P, Q)) - key_priv = DSA.construct((Y, G, P, Q, X)) - - def shortDescription(self): - return "FIPS DSA Tests" - - def test_loopback(self): - hashed_msg = SHA512.new(b"test") - signer = DSS.new(self.key_priv, 'fips-186-3') - signature = signer.sign(hashed_msg) - - verifier = DSS.new(self.key_pub, 'fips-186-3') - verifier.verify(hashed_msg, signature) - - def test_negative_unapproved_hashes(self): - """Verify that unapproved hashes are rejected""" - - from Crypto.Hash import RIPEMD160 - - self.description = "Unapproved hash (RIPEMD160) test" - hash_obj = RIPEMD160.new() - signer = DSS.new(self.key_priv, 'fips-186-3') - self.assertRaises(ValueError, signer.sign, hash_obj) - self.assertRaises(ValueError, signer.verify, hash_obj, b"\x00" * 40) - - def test_negative_unknown_modes_encodings(self): - """Verify that unknown modes/encodings are rejected""" - - self.description = "Unknown mode test" - self.assertRaises(ValueError, DSS.new, self.key_priv, 'fips-186-0') - - self.description = "Unknown encoding test" - self.assertRaises(ValueError, DSS.new, self.key_priv, 'fips-186-3', 'xml') - - def test_asn1_encoding(self): - """Verify ASN.1 encoding""" - - self.description = "ASN.1 encoding test" - hash_obj = SHA1.new() - signer = DSS.new(self.key_priv, 'fips-186-3', 'der') - signature = signer.sign(hash_obj) - - # Verify that output looks like a DER SEQUENCE - self.assertEqual(bord(signature[0]), 48) - signer.verify(hash_obj, signature) - - # Verify that ASN.1 parsing fails as expected - signature = bchr(7) + signature[1:] - self.assertRaises(ValueError, signer.verify, hash_obj, signature) - - def test_sign_verify(self): - """Verify public/private method""" - - self.description = "can_sign() test" - signer = DSS.new(self.key_priv, 'fips-186-3') - self.assertTrue(signer.can_sign()) - - signer = DSS.new(self.key_pub, 'fips-186-3') - self.assertFalse(signer.can_sign()) - - try: - signer.sign(SHA256.new(b'xyz')) - except TypeError as e: - msg = str(e) - else: - msg = "" - self.assertTrue("Private key is needed" in msg) - - -class FIPS_DSA_Tests_KAT(unittest.TestCase): - pass - - -test_vectors_verify = load_test_vectors(("Signature", "DSA"), - "FIPS_186_3_SigVer.rsp", - "Signature Verification 186-3", - {'result': lambda x: x}) or [] - -for idx, tv in enumerate(test_vectors_verify): - - if isinstance(tv, str): - res = re.match(r"\[mod = L=([0-9]+), N=([0-9]+), ([a-zA-Z0-9-]+)\]", tv) - assert(res) - hash_name = res.group(3).replace("-", "") - hash_module = load_hash_by_name(hash_name) - continue - - if hasattr(tv, "p"): - modulus = tv.p - generator = tv.g - suborder = tv.q - continue - - hash_obj = hash_module.new(tv.msg) - - comps = [bytes_to_long(x) for x in (tv.y, generator, modulus, suborder)] - key = DSA.construct(comps, False) # type: ignore - verifier = DSS.new(key, 'fips-186-3') - - def positive_test(self, verifier=verifier, hash_obj=hash_obj, signature=tv.r+tv.s): - verifier.verify(hash_obj, signature) - - def negative_test(self, verifier=verifier, hash_obj=hash_obj, signature=tv.r+tv.s): - self.assertRaises(ValueError, verifier.verify, hash_obj, signature) - - if tv.result == 'p': - setattr(FIPS_DSA_Tests_KAT, "test_verify_positive_%d" % idx, positive_test) - else: - setattr(FIPS_DSA_Tests_KAT, "test_verify_negative_%d" % idx, negative_test) - - -test_vectors_sign = load_test_vectors(("Signature", "DSA"), - "FIPS_186_3_SigGen.txt", - "Signature Creation 186-3", - {}) or [] - -for idx, tv in enumerate(test_vectors_sign): - - if isinstance(tv, str): - res = re.match(r"\[mod = L=([0-9]+), N=([0-9]+), ([a-zA-Z0-9-]+)\]", tv) - assert(res) - hash_name = res.group(3).replace("-", "") - hash_module = load_hash_by_name(hash_name) - continue - - if hasattr(tv, "p"): - modulus = tv.p - generator = tv.g - suborder = tv.q - continue - - hash_obj = hash_module.new(tv.msg) - comps_dsa = [bytes_to_long(x) for x in (tv.y, generator, modulus, suborder, tv.x)] - key = DSA.construct(comps_dsa, False) # type: ignore - signer = DSS.new(key, 'fips-186-3', randfunc=StrRNG(tv.k)) - - def new_test(self, signer=signer, hash_obj=hash_obj, signature=tv.r+tv.s): - self.assertEqual(signer.sign(hash_obj), signature) - setattr(FIPS_DSA_Tests_KAT, "test_sign_%d" % idx, new_test) - - -class FIPS_ECDSA_Tests(unittest.TestCase): - - key_priv = ECC.generate(curve="P-256") - key_pub = key_priv.public_key() - - def shortDescription(self): - return "FIPS ECDSA Tests" - - def test_loopback(self): - hashed_msg = SHA512.new(b"test") - signer = DSS.new(self.key_priv, 'fips-186-3') - signature = signer.sign(hashed_msg) - - verifier = DSS.new(self.key_pub, 'fips-186-3') - verifier.verify(hashed_msg, signature) - - def test_negative_unapproved_hashes(self): - """Verify that unapproved hashes are rejected""" - - from Crypto.Hash import SHA1 - - self.description = "Unapproved hash (SHA-1) test" - hash_obj = SHA1.new() - signer = DSS.new(self.key_priv, 'fips-186-3') - self.assertRaises(ValueError, signer.sign, hash_obj) - self.assertRaises(ValueError, signer.verify, hash_obj, b"\x00" * 40) - - def test_negative_eddsa_key(self): - key = ECC.generate(curve="ed25519") - self.assertRaises(ValueError, DSS.new, key, 'fips-186-3') - - def test_sign_verify(self): - """Verify public/private method""" - - self.description = "can_sign() test" - signer = DSS.new(self.key_priv, 'fips-186-3') - self.assertTrue(signer.can_sign()) - - signer = DSS.new(self.key_pub, 'fips-186-3') - self.assertFalse(signer.can_sign()) - self.assertRaises(TypeError, signer.sign, SHA256.new(b'xyz')) - - try: - signer.sign(SHA256.new(b'xyz')) - except TypeError as e: - msg = str(e) - else: - msg = "" - self.assertTrue("Private key is needed" in msg) - - def test_negative_unknown_modes_encodings(self): - """Verify that unknown modes/encodings are rejected""" - - self.description = "Unknown mode test" - self.assertRaises(ValueError, DSS.new, self.key_priv, 'fips-186-0') - - self.description = "Unknown encoding test" - self.assertRaises(ValueError, DSS.new, self.key_priv, 'fips-186-3', 'xml') - - def test_asn1_encoding(self): - """Verify ASN.1 encoding""" - - self.description = "ASN.1 encoding test" - hash_obj = SHA256.new() - signer = DSS.new(self.key_priv, 'fips-186-3', 'der') - signature = signer.sign(hash_obj) - - # Verify that output looks like a DER SEQUENCE - self.assertEqual(bord(signature[0]), 48) - signer.verify(hash_obj, signature) - - # Verify that ASN.1 parsing fails as expected - signature = bchr(7) + signature[1:] - self.assertRaises(ValueError, signer.verify, hash_obj, signature) - - -class FIPS_ECDSA_Tests_KAT(unittest.TestCase): - pass - - -test_vectors_verify = load_test_vectors(("Signature", "ECDSA"), - "SigVer.rsp", - "ECDSA Signature Verification 186-3", - {'result': lambda x: x, - 'qx': lambda x: int(x, 16), - 'qy': lambda x: int(x, 16), - }) or [] -test_vectors_verify += load_test_vectors(("Signature", "ECDSA"), - "SigVer_TruncatedSHAs.rsp", - "ECDSA Signature Verification 186-3", - {'result': lambda x: x, - 'qx': lambda x: int(x, 16), - 'qy': lambda x: int(x, 16), - }) or [] - - -for idx, tv in enumerate(test_vectors_verify): - - if isinstance(tv, str): - res = re.match(r"\[(P-[0-9]+),(SHA-[0-9]+)\]", tv) - assert res - curve_name = res.group(1) - hash_name = res.group(2).replace("-", "") - if hash_name in ("SHA512224", "SHA512256"): - truncate = hash_name[-3:] - hash_name = hash_name[:-3] - else: - truncate = None - hash_module = load_hash_by_name(hash_name) - continue - - if truncate is None: - hash_obj = hash_module.new(tv.msg) - else: - hash_obj = hash_module.new(tv.msg, truncate=truncate) - ecc_key = ECC.construct(curve=curve_name, point_x=tv.qx, point_y=tv.qy) - verifier = DSS.new(ecc_key, 'fips-186-3') - - def positive_test(self, verifier=verifier, hash_obj=hash_obj, signature=tv.r+tv.s): - verifier.verify(hash_obj, signature) - - def negative_test(self, verifier=verifier, hash_obj=hash_obj, signature=tv.r+tv.s): - self.assertRaises(ValueError, verifier.verify, hash_obj, signature) - - if tv.result.startswith('p'): - setattr(FIPS_ECDSA_Tests_KAT, "test_verify_positive_%d" % idx, positive_test) - else: - setattr(FIPS_ECDSA_Tests_KAT, "test_verify_negative_%d" % idx, negative_test) - - -test_vectors_sign = load_test_vectors(("Signature", "ECDSA"), - "SigGen.txt", - "ECDSA Signature Verification 186-3", - {'d': lambda x: int(x, 16)}) or [] - -for idx, tv in enumerate(test_vectors_sign): - - if isinstance(tv, str): - res = re.match(r"\[(P-[0-9]+),(SHA-[0-9]+)\]", tv) - assert res - curve_name = res.group(1) - hash_name = res.group(2).replace("-", "") - hash_module = load_hash_by_name(hash_name) - continue - - hash_obj = hash_module.new(tv.msg) - ecc_key = ECC.construct(curve=curve_name, d=tv.d) - signer = DSS.new(ecc_key, 'fips-186-3', randfunc=StrRNG(tv.k)) - - def sign_test(self, signer=signer, hash_obj=hash_obj, signature=tv.r+tv.s): - self.assertEqual(signer.sign(hash_obj), signature) - setattr(FIPS_ECDSA_Tests_KAT, "test_sign_%d" % idx, sign_test) - - -class Det_DSA_Tests(unittest.TestCase): - """Tests from rfc6979""" - - # Each key is (p, q, g, x, y, desc) - keys = [ - ( - """ - 86F5CA03DCFEB225063FF830A0C769B9DD9D6153AD91D7CE27F787C43278B447 - E6533B86B18BED6E8A48B784A14C252C5BE0DBF60B86D6385BD2F12FB763ED88 - 73ABFD3F5BA2E0A8C0A59082EAC056935E529DAF7C610467899C77ADEDFC846C - 881870B7B19B2B58F9BE0521A17002E3BDD6B86685EE90B3D9A1B02B782B1779""", - "996F967F6C8E388D9E28D01E205FBA957A5698B1", - """ - 07B0F92546150B62514BB771E2A0C0CE387F03BDA6C56B505209FF25FD3C133D - 89BBCD97E904E09114D9A7DEFDEADFC9078EA544D2E401AEECC40BB9FBBF78FD - 87995A10A1C27CB7789B594BA7EFB5C4326A9FE59A070E136DB77175464ADCA4 - 17BE5DCE2F40D10A46A3A3943F26AB7FD9C0398FF8C76EE0A56826A8A88F1DBD""", - "411602CB19A6CCC34494D79D98EF1E7ED5AF25F7", - """ - 5DF5E01DED31D0297E274E1691C192FE5868FEF9E19A84776454B100CF16F653 - 92195A38B90523E2542EE61871C0440CB87C322FC4B4D2EC5E1E7EC766E1BE8D - 4CE935437DC11C3C8FD426338933EBFE739CB3465F4D3668C5E473508253B1E6 - 82F65CBDC4FAE93C2EA212390E54905A86E2223170B44EAA7DA5DD9FFCFB7F3B""", - "DSA1024" - ), - ( - """ - 9DB6FB5951B66BB6FE1E140F1D2CE5502374161FD6538DF1648218642F0B5C48 - C8F7A41AADFA187324B87674FA1822B00F1ECF8136943D7C55757264E5A1A44F - FE012E9936E00C1D3E9310B01C7D179805D3058B2A9F4BB6F9716BFE6117C6B5 - B3CC4D9BE341104AD4A80AD6C94E005F4B993E14F091EB51743BF33050C38DE2 - 35567E1B34C3D6A5C0CEAA1A0F368213C3D19843D0B4B09DCB9FC72D39C8DE41 - F1BF14D4BB4563CA28371621CAD3324B6A2D392145BEBFAC748805236F5CA2FE - 92B871CD8F9C36D3292B5509CA8CAA77A2ADFC7BFD77DDA6F71125A7456FEA15 - 3E433256A2261C6A06ED3693797E7995FAD5AABBCFBE3EDA2741E375404AE25B""", - "F2C3119374CE76C9356990B465374A17F23F9ED35089BD969F61C6DDE9998C1F", - """ - 5C7FF6B06F8F143FE8288433493E4769C4D988ACE5BE25A0E24809670716C613 - D7B0CEE6932F8FAA7C44D2CB24523DA53FBE4F6EC3595892D1AA58C4328A06C4 - 6A15662E7EAA703A1DECF8BBB2D05DBE2EB956C142A338661D10461C0D135472 - 085057F3494309FFA73C611F78B32ADBB5740C361C9F35BE90997DB2014E2EF5 - AA61782F52ABEB8BD6432C4DD097BC5423B285DAFB60DC364E8161F4A2A35ACA - 3A10B1C4D203CC76A470A33AFDCBDD92959859ABD8B56E1725252D78EAC66E71 - BA9AE3F1DD2487199874393CD4D832186800654760E1E34C09E4D155179F9EC0 - DC4473F996BDCE6EED1CABED8B6F116F7AD9CF505DF0F998E34AB27514B0FFE7""", - "69C7548C21D0DFEA6B9A51C9EAD4E27C33D3B3F180316E5BCAB92C933F0E4DBC", - """ - 667098C654426C78D7F8201EAC6C203EF030D43605032C2F1FA937E5237DBD94 - 9F34A0A2564FE126DC8B715C5141802CE0979C8246463C40E6B6BDAA2513FA61 - 1728716C2E4FD53BC95B89E69949D96512E873B9C8F8DFD499CC312882561ADE - CB31F658E934C0C197F2C4D96B05CBAD67381E7B768891E4DA3843D24D94CDFB - 5126E9B8BF21E8358EE0E0A30EF13FD6A664C0DCE3731F7FB49A4845A4FD8254 - 687972A2D382599C9BAC4E0ED7998193078913032558134976410B89D2C171D1 - 23AC35FD977219597AA7D15C1A9A428E59194F75C721EBCBCFAE44696A499AFA - 74E04299F132026601638CB87AB79190D4A0986315DA8EEC6561C938996BEADF""", - "DSA2048" - ), - ] - - # This is a sequence of items: - # message, k, r, s, hash module - signatures = [ - ( - "sample", - "7BDB6B0FF756E1BB5D53583EF979082F9AD5BD5B", - "2E1A0C2562B2912CAAF89186FB0F42001585DA55", - "29EFB6B0AFF2D7A68EB70CA313022253B9A88DF5", - SHA1, - 'DSA1024' - ), - ( - "sample", - "562097C06782D60C3037BA7BE104774344687649", - "4BC3B686AEA70145856814A6F1BB53346F02101E", - "410697B92295D994D21EDD2F4ADA85566F6F94C1", - SHA224, - 'DSA1024' - ), - ( - "sample", - "519BA0546D0C39202A7D34D7DFA5E760B318BCFB", - "81F2F5850BE5BC123C43F71A3033E9384611C545", - "4CDD914B65EB6C66A8AAAD27299BEE6B035F5E89", - SHA256, - 'DSA1024' - ), - ( - "sample", - "95897CD7BBB944AA932DBC579C1C09EB6FCFC595", - "07F2108557EE0E3921BC1774F1CA9B410B4CE65A", - "54DF70456C86FAC10FAB47C1949AB83F2C6F7595", - SHA384, - 'DSA1024' - ), - ( - "sample", - "09ECE7CA27D0F5A4DD4E556C9DF1D21D28104F8B", - "16C3491F9B8C3FBBDD5E7A7B667057F0D8EE8E1B", - "02C36A127A7B89EDBB72E4FFBC71DABC7D4FC69C", - SHA512, - 'DSA1024' - ), - ( - "test", - "5C842DF4F9E344EE09F056838B42C7A17F4A6433", - "42AB2052FD43E123F0607F115052A67DCD9C5C77", - "183916B0230D45B9931491D4C6B0BD2FB4AAF088", - SHA1, - 'DSA1024' - ), - ( - "test", - "4598B8EFC1A53BC8AECD58D1ABBB0C0C71E67297", - "6868E9964E36C1689F6037F91F28D5F2C30610F2", - "49CEC3ACDC83018C5BD2674ECAAD35B8CD22940F", - SHA224, - 'DSA1024' - ), - ( - "test", - "5A67592E8128E03A417B0484410FB72C0B630E1A", - "22518C127299B0F6FDC9872B282B9E70D0790812", - "6837EC18F150D55DE95B5E29BE7AF5D01E4FE160", - SHA256, - 'DSA1024' - ), - ( - "test", - "220156B761F6CA5E6C9F1B9CF9C24BE25F98CD89", - "854CF929B58D73C3CBFDC421E8D5430CD6DB5E66", - "91D0E0F53E22F898D158380676A871A157CDA622", - SHA384, - 'DSA1024' - ), - ( - "test", - "65D2C2EEB175E370F28C75BFCDC028D22C7DBE9C", - "8EA47E475BA8AC6F2D821DA3BD212D11A3DEB9A0", - "7C670C7AD72B6C050C109E1790008097125433E8", - SHA512, - 'DSA1024' - ), - ( - "sample", - "888FA6F7738A41BDC9846466ABDB8174C0338250AE50CE955CA16230F9CBD53E", - "3A1B2DBD7489D6ED7E608FD036C83AF396E290DBD602408E8677DAABD6E7445A", - "D26FCBA19FA3E3058FFC02CA1596CDBB6E0D20CB37B06054F7E36DED0CDBBCCF", - SHA1, - 'DSA2048' - ), - ( - "sample", - "BC372967702082E1AA4FCE892209F71AE4AD25A6DFD869334E6F153BD0C4D806", - "DC9F4DEADA8D8FF588E98FED0AB690FFCE858DC8C79376450EB6B76C24537E2C", - "A65A9C3BC7BABE286B195D5DA68616DA8D47FA0097F36DD19F517327DC848CEC", - SHA224, - 'DSA2048' - ), - ( - "sample", - "8926A27C40484216F052F4427CFD5647338B7B3939BC6573AF4333569D597C52", - "EACE8BDBBE353C432A795D9EC556C6D021F7A03F42C36E9BC87E4AC7932CC809", - "7081E175455F9247B812B74583E9E94F9EA79BD640DC962533B0680793A38D53", - SHA256, - 'DSA2048' - ), - ( - "sample", - "C345D5AB3DA0A5BCB7EC8F8FB7A7E96069E03B206371EF7D83E39068EC564920", - "B2DA945E91858834FD9BF616EBAC151EDBC4B45D27D0DD4A7F6A22739F45C00B", - "19048B63D9FD6BCA1D9BAE3664E1BCB97F7276C306130969F63F38FA8319021B", - SHA384, - 'DSA2048' - ), - ( - "sample", - "5A12994431785485B3F5F067221517791B85A597B7A9436995C89ED0374668FC", - "2016ED092DC5FB669B8EFB3D1F31A91EECB199879BE0CF78F02BA062CB4C942E", - "D0C76F84B5F091E141572A639A4FB8C230807EEA7D55C8A154A224400AFF2351", - SHA512, - 'DSA2048' - ), - ( - "test", - "6EEA486F9D41A037B2C640BC5645694FF8FF4B98D066A25F76BE641CCB24BA4F", - "C18270A93CFC6063F57A4DFA86024F700D980E4CF4E2CB65A504397273D98EA0", - "414F22E5F31A8B6D33295C7539C1C1BA3A6160D7D68D50AC0D3A5BEAC2884FAA", - SHA1, - 'DSA2048' - ), - ( - "test", - "06BD4C05ED74719106223BE33F2D95DA6B3B541DAD7BFBD7AC508213B6DA6670", - "272ABA31572F6CC55E30BF616B7A265312018DD325BE031BE0CC82AA17870EA3", - "E9CC286A52CCE201586722D36D1E917EB96A4EBDB47932F9576AC645B3A60806", - SHA224, - 'DSA2048' - ), - ( - "test", - "1D6CE6DDA1C5D37307839CD03AB0A5CBB18E60D800937D67DFB4479AAC8DEAD7", - "8190012A1969F9957D56FCCAAD223186F423398D58EF5B3CEFD5A4146A4476F0", - "7452A53F7075D417B4B013B278D1BB8BBD21863F5E7B1CEE679CF2188E1AB19E", - SHA256, - 'DSA2048' - ), - ( - "test", - "206E61F73DBE1B2DC8BE736B22B079E9DACD974DB00EEBBC5B64CAD39CF9F91C", - "239E66DDBE8F8C230A3D071D601B6FFBDFB5901F94D444C6AF56F732BEB954BE", - "6BD737513D5E72FE85D1C750E0F73921FE299B945AAD1C802F15C26A43D34961", - SHA384, - 'DSA2048' - ), - ( - "test", - "AFF1651E4CD6036D57AA8B2A05CCF1A9D5A40166340ECBBDC55BE10B568AA0AA", - "89EC4BB1400ECCFF8E7D9AA515CD1DE7803F2DAFF09693EE7FD1353E90A68307", - "C9F0BDABCC0D880BB137A994CC7F3980CE91CC10FAF529FC46565B15CEA854E1", - SHA512, - 'DSA2048' - ) - ] - - def setUp(self): - # Convert DSA key components from hex strings to integers - # Each key is (p, q, g, x, y, desc) - - from collections import namedtuple - - TestKey = namedtuple('TestKey', 'p q g x y') - new_keys = {} - for k in self.keys: - tk = TestKey(*[t2l(y) for y in k[:-1]]) - new_keys[k[-1]] = tk - self.keys = new_keys - - # Convert signature encoding - TestSig = namedtuple('TestSig', 'message nonce result module test_key') - new_signatures = [] - for message, nonce, r, s, module, test_key in self.signatures: - tsig = TestSig( - tobytes(message), - t2l(nonce), - t2b(r) + t2b(s), - module, - self.keys[test_key] - ) - new_signatures.append(tsig) - self.signatures = new_signatures - - def test1(self): - q = 0x4000000000000000000020108A2E0CC0D99F8A5EF - x = 0x09A4D6792295A7F730FC3F2B49CBC0F62E862272F - p = 2 * q + 1 - y = pow(2, x, p) - key = DSA.construct([pow(y, 2, p), 2, p, q, x], False) - signer = DSS.new(key, 'deterministic-rfc6979') - - # Test _int2octets - self.assertEqual(hexlify(signer._int2octets(x)), - b'009a4d6792295a7f730fc3f2b49cbc0f62e862272f') - - # Test _bits2octets - h1 = SHA256.new(b"sample").digest() - self.assertEqual(hexlify(signer._bits2octets(h1)), - b'01795edf0d54db760f156d0dac04c0322b3a204224') - - def test2(self): - - for sig in self.signatures: - tk = sig.test_key - key = DSA.construct([tk.y, tk.g, tk.p, tk.q, tk.x], False) - signer = DSS.new(key, 'deterministic-rfc6979') - - hash_obj = sig.module.new(sig.message) - result = signer.sign(hash_obj) - self.assertEqual(sig.result, result) - - -class Det_ECDSA_Tests(unittest.TestCase): - - key_priv_p192 = ECC.construct(curve="P-192", d=0x6FAB034934E4C0FC9AE67F5B5659A9D7D1FEFD187EE09FD4) - key_pub_p192 = key_priv_p192.public_key() - - key_priv_p224 = ECC.construct(curve="P-224", d=0xF220266E1105BFE3083E03EC7A3A654651F45E37167E88600BF257C1) - key_pub_p224 = key_priv_p224.public_key() - - key_priv_p256 = ECC.construct(curve="P-256", d=0xC9AFA9D845BA75166B5C215767B1D6934E50C3DB36E89B127B8A622B120F6721) - key_pub_p256 = key_priv_p256.public_key() - - key_priv_p384 = ECC.construct(curve="P-384", d=0x6B9D3DAD2E1B8C1C05B19875B6659F4DE23C3B667BF297BA9AA47740787137D896D5724E4C70A825F872C9EA60D2EDF5) - key_pub_p384 = key_priv_p384.public_key() - - key_priv_p521 = ECC.construct(curve="P-521", d=0x0FAD06DAA62BA3B25D2FB40133DA757205DE67F5BB0018FEE8C86E1B68C7E75CAA896EB32F1F47C70855836A6D16FCC1466F6D8FBEC67DB89EC0C08B0E996B83538) - key_pub_p521 = key_priv_p521.public_key() - - # This is a sequence of items: - # message, k, r, s, hash module - # taken from RFC6979 - signatures_p192_ = ( - ( - "sample", - "37D7CA00D2C7B0E5E412AC03BD44BA837FDD5B28CD3B0021", - "98C6BD12B23EAF5E2A2045132086BE3EB8EBD62ABF6698FF", - "57A22B07DEA9530F8DE9471B1DC6624472E8E2844BC25B64", - SHA1 - ), - ( - "sample", - "4381526B3FC1E7128F202E194505592F01D5FF4C5AF015D8", - "A1F00DAD97AEEC91C95585F36200C65F3C01812AA60378F5", - "E07EC1304C7C6C9DEBBE980B9692668F81D4DE7922A0F97A", - SHA224 - ), - ( - "sample", - "32B1B6D7D42A05CB449065727A84804FB1A3E34D8F261496", - "4B0B8CE98A92866A2820E20AA6B75B56382E0F9BFD5ECB55", - "CCDB006926EA9565CBADC840829D8C384E06DE1F1E381B85", - SHA256 - ), - ( - "sample", - "4730005C4FCB01834C063A7B6760096DBE284B8252EF4311", - "DA63BF0B9ABCF948FBB1E9167F136145F7A20426DCC287D5", - "C3AA2C960972BD7A2003A57E1C4C77F0578F8AE95E31EC5E", - SHA384 - ), - ( - "sample", - "A2AC7AB055E4F20692D49209544C203A7D1F2C0BFBC75DB1", - "4D60C5AB1996BD848343B31C00850205E2EA6922DAC2E4B8", - "3F6E837448F027A1BF4B34E796E32A811CBB4050908D8F67", - SHA512 - ), - ( - "test", - "D9CF9C3D3297D3260773A1DA7418DB5537AB8DD93DE7FA25", - "0F2141A0EBBC44D2E1AF90A50EBCFCE5E197B3B7D4DE036D", - "EB18BC9E1F3D7387500CB99CF5F7C157070A8961E38700B7", - SHA1 - ), - ( - "test", - "F5DC805F76EF851800700CCE82E7B98D8911B7D510059FBE", - "6945A1C1D1B2206B8145548F633BB61CEF04891BAF26ED34", - "B7FB7FDFC339C0B9BD61A9F5A8EAF9BE58FC5CBA2CB15293", - SHA224 - ), - ( - "test", - "5C4CE89CF56D9E7C77C8585339B006B97B5F0680B4306C6C", - "3A718BD8B4926C3B52EE6BBE67EF79B18CB6EB62B1AD97AE", - "5662E6848A4A19B1F1AE2F72ACD4B8BBE50F1EAC65D9124F", - SHA256 - ), - ( - "test", - "5AFEFB5D3393261B828DB6C91FBC68C230727B030C975693", - "B234B60B4DB75A733E19280A7A6034BD6B1EE88AF5332367", - "7994090B2D59BB782BE57E74A44C9A1C700413F8ABEFE77A", - SHA384 - ), - ( - "test", - "0758753A5254759C7CFBAD2E2D9B0792EEE44136C9480527", - "FE4F4AE86A58B6507946715934FE2D8FF9D95B6B098FE739", - "74CF5605C98FBA0E1EF34D4B5A1577A7DCF59457CAE52290", - SHA512 - ) - ) - - signatures_p224_ = ( - ( - "sample", - "7EEFADD91110D8DE6C2C470831387C50D3357F7F4D477054B8B426BC", - "22226F9D40A96E19C4A301CE5B74B115303C0F3A4FD30FC257FB57AC", - "66D1CDD83E3AF75605DD6E2FEFF196D30AA7ED7A2EDF7AF475403D69", - SHA1 - ), - ( - "sample", - "C1D1F2F10881088301880506805FEB4825FE09ACB6816C36991AA06D", - "1CDFE6662DDE1E4A1EC4CDEDF6A1F5A2FB7FBD9145C12113E6ABFD3E", - "A6694FD7718A21053F225D3F46197CA699D45006C06F871808F43EBC", - SHA224 - ), - ( - "sample", - "AD3029E0278F80643DE33917CE6908C70A8FF50A411F06E41DEDFCDC", - "61AA3DA010E8E8406C656BC477A7A7189895E7E840CDFE8FF42307BA", - "BC814050DAB5D23770879494F9E0A680DC1AF7161991BDE692B10101", - SHA256 - ), - ( - "sample", - "52B40F5A9D3D13040F494E83D3906C6079F29981035C7BD51E5CAC40", - "0B115E5E36F0F9EC81F1325A5952878D745E19D7BB3EABFABA77E953", - "830F34CCDFE826CCFDC81EB4129772E20E122348A2BBD889A1B1AF1D", - SHA384 - ), - ( - "sample", - "9DB103FFEDEDF9CFDBA05184F925400C1653B8501BAB89CEA0FBEC14", - "074BD1D979D5F32BF958DDC61E4FB4872ADCAFEB2256497CDAC30397", - "A4CECA196C3D5A1FF31027B33185DC8EE43F288B21AB342E5D8EB084", - SHA512 - ), - ( - "test", - "2519178F82C3F0E4F87ED5883A4E114E5B7A6E374043D8EFD329C253", - "DEAA646EC2AF2EA8AD53ED66B2E2DDAA49A12EFD8356561451F3E21C", - "95987796F6CF2062AB8135271DE56AE55366C045F6D9593F53787BD2", - SHA1 - ), - ( - "test", - "DF8B38D40DCA3E077D0AC520BF56B6D565134D9B5F2EAE0D34900524", - "C441CE8E261DED634E4CF84910E4C5D1D22C5CF3B732BB204DBEF019", - "902F42847A63BDC5F6046ADA114953120F99442D76510150F372A3F4", - SHA224 - ), - ( - "test", - "FF86F57924DA248D6E44E8154EB69F0AE2AEBAEE9931D0B5A969F904", - "AD04DDE87B84747A243A631EA47A1BA6D1FAA059149AD2440DE6FBA6", - "178D49B1AE90E3D8B629BE3DB5683915F4E8C99FDF6E666CF37ADCFD", - SHA256 - ), - ( - "test", - "7046742B839478C1B5BD31DB2E862AD868E1A45C863585B5F22BDC2D", - "389B92682E399B26518A95506B52C03BC9379A9DADF3391A21FB0EA4", - "414A718ED3249FF6DBC5B50C27F71F01F070944DA22AB1F78F559AAB", - SHA384 - ), - ( - "test", - "E39C2AA4EA6BE2306C72126D40ED77BF9739BB4D6EF2BBB1DCB6169D", - "049F050477C5ADD858CAC56208394B5A55BAEBBE887FDF765047C17C", - "077EB13E7005929CEFA3CD0403C7CDCC077ADF4E44F3C41B2F60ECFF", - SHA512 - ) - ) - - signatures_p256_ = ( - ( - "sample", - "882905F1227FD620FBF2ABF21244F0BA83D0DC3A9103DBBEE43A1FB858109DB4", - "61340C88C3AAEBEB4F6D667F672CA9759A6CCAA9FA8811313039EE4A35471D32", - "6D7F147DAC089441BB2E2FE8F7A3FA264B9C475098FDCF6E00D7C996E1B8B7EB", - SHA1 - ), - ( - "sample", - "103F90EE9DC52E5E7FB5132B7033C63066D194321491862059967C715985D473", - "53B2FFF5D1752B2C689DF257C04C40A587FABABB3F6FC2702F1343AF7CA9AA3F", - "B9AFB64FDC03DC1A131C7D2386D11E349F070AA432A4ACC918BEA988BF75C74C", - SHA224 - ), - ( - "sample", - "A6E3C57DD01ABE90086538398355DD4C3B17AA873382B0F24D6129493D8AAD60", - "EFD48B2AACB6A8FD1140DD9CD45E81D69D2C877B56AAF991C34D0EA84EAF3716", - "F7CB1C942D657C41D436C7A1B6E29F65F3E900DBB9AFF4064DC4AB2F843ACDA8", - SHA256 - ), - ( - "sample", - "09F634B188CEFD98E7EC88B1AA9852D734D0BC272F7D2A47DECC6EBEB375AAD4", - "0EAFEA039B20E9B42309FB1D89E213057CBF973DC0CFC8F129EDDDC800EF7719", - "4861F0491E6998B9455193E34E7B0D284DDD7149A74B95B9261F13ABDE940954", - SHA384 - ), - ( - "sample", - "5FA81C63109BADB88C1F367B47DA606DA28CAD69AA22C4FE6AD7DF73A7173AA5", - "8496A60B5E9B47C825488827E0495B0E3FA109EC4568FD3F8D1097678EB97F00", - "2362AB1ADBE2B8ADF9CB9EDAB740EA6049C028114F2460F96554F61FAE3302FE", - SHA512 - ), - ( - "test", - "8C9520267C55D6B980DF741E56B4ADEE114D84FBFA2E62137954164028632A2E", - "0CBCC86FD6ABD1D99E703E1EC50069EE5C0B4BA4B9AC60E409E8EC5910D81A89", - "01B9D7B73DFAA60D5651EC4591A0136F87653E0FD780C3B1BC872FFDEAE479B1", - SHA1 - ), - ( - "test", - "669F4426F2688B8BE0DB3A6BD1989BDAEFFF84B649EEB84F3DD26080F667FAA7", - "C37EDB6F0AE79D47C3C27E962FA269BB4F441770357E114EE511F662EC34A692", - "C820053A05791E521FCAAD6042D40AEA1D6B1A540138558F47D0719800E18F2D", - SHA224 - ), - ( - "test", - "D16B6AE827F17175E040871A1C7EC3500192C4C92677336EC2537ACAEE0008E0", - "F1ABB023518351CD71D881567B1EA663ED3EFCF6C5132B354F28D3B0B7D38367", - "019F4113742A2B14BD25926B49C649155F267E60D3814B4C0CC84250E46F0083", - SHA256 - ), - ( - "test", - "16AEFFA357260B04B1DD199693960740066C1A8F3E8EDD79070AA914D361B3B8", - "83910E8B48BB0C74244EBDF7F07A1C5413D61472BD941EF3920E623FBCCEBEB6", - "8DDBEC54CF8CD5874883841D712142A56A8D0F218F5003CB0296B6B509619F2C", - SHA384 - ), - ( - "test", - "6915D11632ACA3C40D5D51C08DAF9C555933819548784480E93499000D9F0B7F", - "461D93F31B6540894788FD206C07CFA0CC35F46FA3C91816FFF1040AD1581A04", - "39AF9F15DE0DB8D97E72719C74820D304CE5226E32DEDAE67519E840D1194E55", - SHA512 - ) - ) - - signatures_p384_ = ( - ( - "sample", - "4471EF7518BB2C7C20F62EAE1C387AD0C5E8E470995DB4ACF694466E6AB096630F29E5938D25106C3C340045A2DB01A7", - "EC748D839243D6FBEF4FC5C4859A7DFFD7F3ABDDF72014540C16D73309834FA37B9BA002899F6FDA3A4A9386790D4EB2", - "A3BCFA947BEEF4732BF247AC17F71676CB31A847B9FF0CBC9C9ED4C1A5B3FACF26F49CA031D4857570CCB5CA4424A443", - SHA1 - ), - ( - "sample", - "A4E4D2F0E729EB786B31FC20AD5D849E304450E0AE8E3E341134A5C1AFA03CAB8083EE4E3C45B06A5899EA56C51B5879", - "42356E76B55A6D9B4631C865445DBE54E056D3B3431766D0509244793C3F9366450F76EE3DE43F5A125333A6BE060122", - "9DA0C81787064021E78DF658F2FBB0B042BF304665DB721F077A4298B095E4834C082C03D83028EFBF93A3C23940CA8D", - SHA224 - ), - ( - "sample", - "180AE9F9AEC5438A44BC159A1FCB277C7BE54FA20E7CF404B490650A8ACC414E375572342863C899F9F2EDF9747A9B60", - "21B13D1E013C7FA1392D03C5F99AF8B30C570C6F98D4EA8E354B63A21D3DAA33BDE1E888E63355D92FA2B3C36D8FB2CD", - "F3AA443FB107745BF4BD77CB3891674632068A10CA67E3D45DB2266FA7D1FEEBEFDC63ECCD1AC42EC0CB8668A4FA0AB0", - SHA256 - ), - ( - "sample", - "94ED910D1A099DAD3254E9242AE85ABDE4BA15168EAF0CA87A555FD56D10FBCA2907E3E83BA95368623B8C4686915CF9", - "94EDBB92A5ECB8AAD4736E56C691916B3F88140666CE9FA73D64C4EA95AD133C81A648152E44ACF96E36DD1E80FABE46", - "99EF4AEB15F178CEA1FE40DB2603138F130E740A19624526203B6351D0A3A94FA329C145786E679E7B82C71A38628AC8", - SHA384 - ), - ( - "sample", - "92FC3C7183A883E24216D1141F1A8976C5B0DD797DFA597E3D7B32198BD35331A4E966532593A52980D0E3AAA5E10EC3", - "ED0959D5880AB2D869AE7F6C2915C6D60F96507F9CB3E047C0046861DA4A799CFE30F35CC900056D7C99CD7882433709", - "512C8CCEEE3890A84058CE1E22DBC2198F42323CE8ACA9135329F03C068E5112DC7CC3EF3446DEFCEB01A45C2667FDD5", - SHA512 - ), - ( - "test", - "66CC2C8F4D303FC962E5FF6A27BD79F84EC812DDAE58CF5243B64A4AD8094D47EC3727F3A3C186C15054492E30698497", - "4BC35D3A50EF4E30576F58CD96CE6BF638025EE624004A1F7789A8B8E43D0678ACD9D29876DAF46638645F7F404B11C7", - "D5A6326C494ED3FF614703878961C0FDE7B2C278F9A65FD8C4B7186201A2991695BA1C84541327E966FA7B50F7382282", - SHA1 - ), - ( - "test", - "18FA39DB95AA5F561F30FA3591DC59C0FA3653A80DAFFA0B48D1A4C6DFCBFF6E3D33BE4DC5EB8886A8ECD093F2935726", - "E8C9D0B6EA72A0E7837FEA1D14A1A9557F29FAA45D3E7EE888FC5BF954B5E62464A9A817C47FF78B8C11066B24080E72", - "07041D4A7A0379AC7232FF72E6F77B6DDB8F09B16CCE0EC3286B2BD43FA8C6141C53EA5ABEF0D8231077A04540A96B66", - SHA224 - ), - ( - "test", - "0CFAC37587532347DC3389FDC98286BBA8C73807285B184C83E62E26C401C0FAA48DD070BA79921A3457ABFF2D630AD7", - "6D6DEFAC9AB64DABAFE36C6BF510352A4CC27001263638E5B16D9BB51D451559F918EEDAF2293BE5B475CC8F0188636B", - "2D46F3BECBCC523D5F1A1256BF0C9B024D879BA9E838144C8BA6BAEB4B53B47D51AB373F9845C0514EEFB14024787265", - SHA256 - ), - ( - "test", - "015EE46A5BF88773ED9123A5AB0807962D193719503C527B031B4C2D225092ADA71F4A459BC0DA98ADB95837DB8312EA", - "8203B63D3C853E8D77227FB377BCF7B7B772E97892A80F36AB775D509D7A5FEB0542A7F0812998DA8F1DD3CA3CF023DB", - "DDD0760448D42D8A43AF45AF836FCE4DE8BE06B485E9B61B827C2F13173923E06A739F040649A667BF3B828246BAA5A5", - SHA384 - ), - ( - "test", - "3780C4F67CB15518B6ACAE34C9F83568D2E12E47DEAB6C50A4E4EE5319D1E8CE0E2CC8A136036DC4B9C00E6888F66B6C", - "A0D5D090C9980FAF3C2CE57B7AE951D31977DD11C775D314AF55F76C676447D06FB6495CD21B4B6E340FC236584FB277", - "976984E59B4C77B0E8E4460DCA3D9F20E07B9BB1F63BEEFAF576F6B2E8B224634A2092CD3792E0159AD9CEE37659C736", - SHA512 - ), - ) - - signatures_p521_ = ( - ( - "sample", - "0089C071B419E1C2820962321787258469511958E80582E95D8378E0C2CCDB3CB42BEDE42F50E3FA3C71F5A76724281D31D9C89F0F91FC1BE4918DB1C03A5838D0F9", - "00343B6EC45728975EA5CBA6659BBB6062A5FF89EEA58BE3C80B619F322C87910FE092F7D45BB0F8EEE01ED3F20BABEC079D202AE677B243AB40B5431D497C55D75D", - "00E7B0E675A9B24413D448B8CC119D2BF7B2D2DF032741C096634D6D65D0DBE3D5694625FB9E8104D3B842C1B0E2D0B98BEA19341E8676AEF66AE4EBA3D5475D5D16", - SHA1 - ), - ( - "sample", - "0121415EC2CD7726330A61F7F3FA5DE14BE9436019C4DB8CB4041F3B54CF31BE0493EE3F427FB906393D895A19C9523F3A1D54BB8702BD4AA9C99DAB2597B92113F3", - "01776331CFCDF927D666E032E00CF776187BC9FDD8E69D0DABB4109FFE1B5E2A30715F4CC923A4A5E94D2503E9ACFED92857B7F31D7152E0F8C00C15FF3D87E2ED2E", - "0050CB5265417FE2320BBB5A122B8E1A32BD699089851128E360E620A30C7E17BA41A666AF126CE100E5799B153B60528D5300D08489CA9178FB610A2006C254B41F", - SHA224 - ), - ( - "sample", - "00EDF38AFCAAECAB4383358B34D67C9F2216C8382AAEA44A3DAD5FDC9C32575761793FEF24EB0FC276DFC4F6E3EC476752F043CF01415387470BCBD8678ED2C7E1A0", - "01511BB4D675114FE266FC4372B87682BAECC01D3CC62CF2303C92B3526012659D16876E25C7C1E57648F23B73564D67F61C6F14D527D54972810421E7D87589E1A7", - "004A171143A83163D6DF460AAF61522695F207A58B95C0644D87E52AA1A347916E4F7A72930B1BC06DBE22CE3F58264AFD23704CBB63B29B931F7DE6C9D949A7ECFC", - SHA256 - ), - ( - "sample", - "01546A108BC23A15D6F21872F7DED661FA8431DDBD922D0DCDB77CC878C8553FFAD064C95A920A750AC9137E527390D2D92F153E66196966EA554D9ADFCB109C4211", - "01EA842A0E17D2DE4F92C15315C63DDF72685C18195C2BB95E572B9C5136CA4B4B576AD712A52BE9730627D16054BA40CC0B8D3FF035B12AE75168397F5D50C67451", - "01F21A3CEE066E1961025FB048BD5FE2B7924D0CD797BABE0A83B66F1E35EEAF5FDE143FA85DC394A7DEE766523393784484BDF3E00114A1C857CDE1AA203DB65D61", - SHA384 - ), - ( - "sample", - "01DAE2EA071F8110DC26882D4D5EAE0621A3256FC8847FB9022E2B7D28E6F10198B1574FDD03A9053C08A1854A168AA5A57470EC97DD5CE090124EF52A2F7ECBFFD3", - "00C328FAFCBD79DD77850370C46325D987CB525569FB63C5D3BC53950E6D4C5F174E25A1EE9017B5D450606ADD152B534931D7D4E8455CC91F9B15BF05EC36E377FA", - "00617CCE7CF5064806C467F678D3B4080D6F1CC50AF26CA209417308281B68AF282623EAA63E5B5C0723D8B8C37FF0777B1A20F8CCB1DCCC43997F1EE0E44DA4A67A", - SHA512 - ), - ( - "test", - "00BB9F2BF4FE1038CCF4DABD7139A56F6FD8BB1386561BD3C6A4FC818B20DF5DDBA80795A947107A1AB9D12DAA615B1ADE4F7A9DC05E8E6311150F47F5C57CE8B222", - "013BAD9F29ABE20DE37EBEB823C252CA0F63361284015A3BF430A46AAA80B87B0693F0694BD88AFE4E661FC33B094CD3B7963BED5A727ED8BD6A3A202ABE009D0367", - "01E9BB81FF7944CA409AD138DBBEE228E1AFCC0C890FC78EC8604639CB0DBDC90F717A99EAD9D272855D00162EE9527567DD6A92CBD629805C0445282BBC916797FF", - SHA1 - ), - ( - "test", - "0040D09FCF3C8A5F62CF4FB223CBBB2B9937F6B0577C27020A99602C25A01136987E452988781484EDBBCF1C47E554E7FC901BC3085E5206D9F619CFF07E73D6F706", - "01C7ED902E123E6815546065A2C4AF977B22AA8EADDB68B2C1110E7EA44D42086BFE4A34B67DDC0E17E96536E358219B23A706C6A6E16BA77B65E1C595D43CAE17FB", - "0177336676304FCB343CE028B38E7B4FBA76C1C1B277DA18CAD2A8478B2A9A9F5BEC0F3BA04F35DB3E4263569EC6AADE8C92746E4C82F8299AE1B8F1739F8FD519A4", - SHA224 - ), - ( - "test", - "001DE74955EFAABC4C4F17F8E84D881D1310B5392D7700275F82F145C61E843841AF09035BF7A6210F5A431A6A9E81C9323354A9E69135D44EBD2FCAA7731B909258", - "000E871C4A14F993C6C7369501900C4BC1E9C7B0B4BA44E04868B30B41D8071042EB28C4C250411D0CE08CD197E4188EA4876F279F90B3D8D74A3C76E6F1E4656AA8", - "00CD52DBAA33B063C3A6CD8058A1FB0A46A4754B034FCC644766CA14DA8CA5CA9FDE00E88C1AD60CCBA759025299079D7A427EC3CC5B619BFBC828E7769BCD694E86", - SHA256 - ), - ( - "test", - "01F1FC4A349A7DA9A9E116BFDD055DC08E78252FF8E23AC276AC88B1770AE0B5DCEB1ED14A4916B769A523CE1E90BA22846AF11DF8B300C38818F713DADD85DE0C88", - "014BEE21A18B6D8B3C93FAB08D43E739707953244FDBE924FA926D76669E7AC8C89DF62ED8975C2D8397A65A49DCC09F6B0AC62272741924D479354D74FF6075578C", - "0133330865C067A0EAF72362A65E2D7BC4E461E8C8995C3B6226A21BD1AA78F0ED94FE536A0DCA35534F0CD1510C41525D163FE9D74D134881E35141ED5E8E95B979", - SHA384 - ), - ( - "test", - "016200813020EC986863BEDFC1B121F605C1215645018AEA1A7B215A564DE9EB1B38A67AA1128B80CE391C4FB71187654AAA3431027BFC7F395766CA988C964DC56D", - "013E99020ABF5CEE7525D16B69B229652AB6BDF2AFFCAEF38773B4B7D08725F10CDB93482FDCC54EDCEE91ECA4166B2A7C6265EF0CE2BD7051B7CEF945BABD47EE6D", - "01FBD0013C674AA79CB39849527916CE301C66EA7CE8B80682786AD60F98F7E78A19CA69EFF5C57400E3B3A0AD66CE0978214D13BAF4E9AC60752F7B155E2DE4DCE3", - SHA512 - ), - ) - - signatures_p192 = [] - for a, b, c, d, e in signatures_p192_: - new_tv = (tobytes(a), unhexlify(b), unhexlify(c), unhexlify(d), e) - signatures_p192.append(new_tv) - - signatures_p224 = [] - for a, b, c, d, e in signatures_p224_: - new_tv = (tobytes(a), unhexlify(b), unhexlify(c), unhexlify(d), e) - signatures_p224.append(new_tv) - - signatures_p256 = [] - for a, b, c, d, e in signatures_p256_: - new_tv = (tobytes(a), unhexlify(b), unhexlify(c), unhexlify(d), e) - signatures_p256.append(new_tv) - - signatures_p384 = [] - for a, b, c, d, e in signatures_p384_: - new_tv = (tobytes(a), unhexlify(b), unhexlify(c), unhexlify(d), e) - signatures_p384.append(new_tv) - - signatures_p521 = [] - for a, b, c, d, e in signatures_p521_: - new_tv = (tobytes(a), unhexlify(b), unhexlify(c), unhexlify(d), e) - signatures_p521.append(new_tv) - - def shortDescription(self): - return "Deterministic ECDSA Tests" - - def test_loopback_p192(self): - hashed_msg = SHA512.new(b"test") - signer = DSS.new(self.key_priv_p192, 'deterministic-rfc6979') - signature = signer.sign(hashed_msg) - - verifier = DSS.new(self.key_pub_p192, 'deterministic-rfc6979') - verifier.verify(hashed_msg, signature) - - def test_loopback_p224(self): - hashed_msg = SHA512.new(b"test") - signer = DSS.new(self.key_priv_p224, 'deterministic-rfc6979') - signature = signer.sign(hashed_msg) - - verifier = DSS.new(self.key_pub_p224, 'deterministic-rfc6979') - verifier.verify(hashed_msg, signature) - - def test_loopback_p256(self): - hashed_msg = SHA512.new(b"test") - signer = DSS.new(self.key_priv_p256, 'deterministic-rfc6979') - signature = signer.sign(hashed_msg) - - verifier = DSS.new(self.key_pub_p256, 'deterministic-rfc6979') - verifier.verify(hashed_msg, signature) - - def test_loopback_p384(self): - hashed_msg = SHA512.new(b"test") - signer = DSS.new(self.key_priv_p384, 'deterministic-rfc6979') - signature = signer.sign(hashed_msg) - - verifier = DSS.new(self.key_pub_p384, 'deterministic-rfc6979') - verifier.verify(hashed_msg, signature) - - def test_loopback_p521(self): - hashed_msg = SHA512.new(b"test") - signer = DSS.new(self.key_priv_p521, 'deterministic-rfc6979') - signature = signer.sign(hashed_msg) - - verifier = DSS.new(self.key_pub_p521, 'deterministic-rfc6979') - verifier.verify(hashed_msg, signature) - - def test_data_rfc6979_p192(self): - signer = DSS.new(self.key_priv_p192, 'deterministic-rfc6979') - for message, k, r, s, module in self.signatures_p192: - hash_obj = module.new(message) - result = signer.sign(hash_obj) - self.assertEqual(r + s, result) - - def test_data_rfc6979_p224(self): - signer = DSS.new(self.key_priv_p224, 'deterministic-rfc6979') - for message, k, r, s, module in self.signatures_p224: - hash_obj = module.new(message) - result = signer.sign(hash_obj) - self.assertEqual(r + s, result) - - def test_data_rfc6979_p256(self): - signer = DSS.new(self.key_priv_p256, 'deterministic-rfc6979') - for message, k, r, s, module in self.signatures_p256: - hash_obj = module.new(message) - result = signer.sign(hash_obj) - self.assertEqual(r + s, result) - - def test_data_rfc6979_p384(self): - signer = DSS.new(self.key_priv_p384, 'deterministic-rfc6979') - for message, k, r, s, module in self.signatures_p384: - hash_obj = module.new(message) - result = signer.sign(hash_obj) - self.assertEqual(r + s, result) - - def test_data_rfc6979_p521(self): - signer = DSS.new(self.key_priv_p521, 'deterministic-rfc6979') - for message, k, r, s, module in self.signatures_p521: - hash_obj = module.new(message) - result = signer.sign(hash_obj) - self.assertEqual(r + s, result) - - -def get_hash_module(hash_name): - if hash_name == "SHA-512": - hash_module = SHA512 - elif hash_name == "SHA-512/224": - hash_module = SHA512.new(truncate="224") - elif hash_name == "SHA-512/256": - hash_module = SHA512.new(truncate="256") - elif hash_name == "SHA-384": - hash_module = SHA384 - elif hash_name == "SHA-256": - hash_module = SHA256 - elif hash_name == "SHA-224": - hash_module = SHA224 - elif hash_name == "SHA-1": - hash_module = SHA1 - elif hash_name == "SHA3-224": - hash_module = SHA3_224 - elif hash_name == "SHA3-256": - hash_module = SHA3_256 - elif hash_name == "SHA3-384": - hash_module = SHA3_384 - elif hash_name == "SHA3-512": - hash_module = SHA3_512 - else: - raise ValueError("Unknown hash algorithm: " + hash_name) - return hash_module - - -class TestVectorsDSAWycheproof(unittest.TestCase): - - def __init__(self, wycheproof_warnings, slow_tests): - unittest.TestCase.__init__(self) - self._wycheproof_warnings = wycheproof_warnings - self._slow_tests = slow_tests - self._id = "None" - self.tv = [] - - def setUp(self): - - def filter_dsa(group): - return DSA.import_key(group['keyPem']) - - def filter_sha(group): - return get_hash_module(group['sha']) - - def filter_type(group): - sig_type = group['type'] - if sig_type != 'DsaVerify': - raise ValueError("Unknown signature type " + sig_type) - return sig_type - - result = load_test_vectors_wycheproof(("Signature", "wycheproof"), - "dsa_test.json", - "Wycheproof DSA signature", - group_tag={'key': filter_dsa, - 'hash_module': filter_sha, - 'sig_type': filter_type}) - self.tv += result - - def shortDescription(self): - return self._id - - def warn(self, tv): - if tv.warning and self._wycheproof_warnings: - import warnings - warnings.warn("Wycheproof warning: %s (%s)" % (self._id, tv.comment)) - - def test_verify(self, tv): - self._id = "Wycheproof DSA Test #" + str(tv.id) - - hashed_msg = tv.hash_module.new(tv.msg) - signer = DSS.new(tv.key, 'fips-186-3', encoding='der') - try: - signature = signer.verify(hashed_msg, tv.sig) - except ValueError as e: - if tv.warning: - return - assert not tv.valid - else: - assert tv.valid - self.warn(tv) - - def runTest(self): - for tv in self.tv: - self.test_verify(tv) - - -class TestVectorsECDSAWycheproof(unittest.TestCase): - - def __init__(self, wycheproof_warnings, slow_tests): - unittest.TestCase.__init__(self) - self._wycheproof_warnings = wycheproof_warnings - self._slow_tests = slow_tests - self._id = "None" - - def add_tests(self, filename): - - def filter_ecc(group): - # These are the only curves we accept to skip - if group['key']['curve'] in ('secp224k1', 'secp256k1', - 'brainpoolP224r1', 'brainpoolP224t1', - 'brainpoolP256r1', 'brainpoolP256t1', - 'brainpoolP320r1', 'brainpoolP320t1', - 'brainpoolP384r1', 'brainpoolP384t1', - 'brainpoolP512r1', 'brainpoolP512t1', - ): - return None - return ECC.import_key(group['keyPem']) - - def filter_sha(group): - return get_hash_module(group['sha']) - - def filter_encoding(group): - encoding_name = group['type'] - if encoding_name == "EcdsaVerify": - return "der" - elif encoding_name == "EcdsaP1363Verify": - return "binary" - else: - raise ValueError("Unknown signature type " + encoding_name) - - result = load_test_vectors_wycheproof(("Signature", "wycheproof"), - filename, - "Wycheproof ECDSA signature (%s)" % filename, - group_tag={'key': filter_ecc, - 'hash_module': filter_sha, - 'encoding': filter_encoding, - }) - self.tv += result - - def setUp(self): - self.tv = [] - self.add_tests("ecdsa_secp224r1_sha224_p1363_test.json") - self.add_tests("ecdsa_secp224r1_sha224_test.json") - if self._slow_tests: - self.add_tests("ecdsa_secp224r1_sha256_p1363_test.json") - self.add_tests("ecdsa_secp224r1_sha256_test.json") - self.add_tests("ecdsa_secp224r1_sha3_224_test.json") - self.add_tests("ecdsa_secp224r1_sha3_256_test.json") - self.add_tests("ecdsa_secp224r1_sha3_512_test.json") - self.add_tests("ecdsa_secp224r1_sha512_p1363_test.json") - self.add_tests("ecdsa_secp224r1_sha512_test.json") - self.add_tests("ecdsa_secp256r1_sha256_p1363_test.json") - self.add_tests("ecdsa_secp256r1_sha256_test.json") - self.add_tests("ecdsa_secp256r1_sha3_256_test.json") - self.add_tests("ecdsa_secp256r1_sha3_512_test.json") - self.add_tests("ecdsa_secp256r1_sha512_p1363_test.json") - self.add_tests("ecdsa_secp256r1_sha512_test.json") - if self._slow_tests: - self.add_tests("ecdsa_secp384r1_sha3_384_test.json") - self.add_tests("ecdsa_secp384r1_sha3_512_test.json") - self.add_tests("ecdsa_secp384r1_sha384_p1363_test.json") - self.add_tests("ecdsa_secp384r1_sha384_test.json") - self.add_tests("ecdsa_secp384r1_sha512_p1363_test.json") - self.add_tests("ecdsa_secp384r1_sha512_test.json") - if self._slow_tests: - self.add_tests("ecdsa_secp521r1_sha3_512_test.json") - self.add_tests("ecdsa_secp521r1_sha512_p1363_test.json") - self.add_tests("ecdsa_secp521r1_sha512_test.json") - self.add_tests("ecdsa_test.json") - self.add_tests("ecdsa_webcrypto_test.json") - - def shortDescription(self): - return self._id - - def warn(self, tv): - if tv.warning and self._wycheproof_warnings: - import warnings - warnings.warn("Wycheproof warning: %s (%s)" % (self._id, tv.comment)) - - def test_verify(self, tv): - self._id = "Wycheproof ECDSA Test #%d (%s, %s)" % (tv.id, tv.comment, tv.filename) - - # Skip tests with unsupported curves - if tv.key is None: - return - - hashed_msg = tv.hash_module.new(tv.msg) - signer = DSS.new(tv.key, 'fips-186-3', encoding=tv.encoding) - try: - signature = signer.verify(hashed_msg, tv.sig) - except ValueError as e: - if tv.warning: - return - if tv.comment == "k*G has a large x-coordinate": - return - assert not tv.valid - else: - assert tv.valid - self.warn(tv) - - def runTest(self): - for tv in self.tv: - self.test_verify(tv) - - -def get_tests(config={}): - wycheproof_warnings = config.get('wycheproof_warnings') - - tests = [] - tests += list_test_cases(FIPS_DSA_Tests) - tests += list_test_cases(FIPS_ECDSA_Tests) - tests += list_test_cases(Det_DSA_Tests) - tests += list_test_cases(Det_ECDSA_Tests) - - slow_tests = config.get('slow_tests') - if slow_tests: - tests += list_test_cases(FIPS_DSA_Tests_KAT) - tests += list_test_cases(FIPS_ECDSA_Tests_KAT) - - tests += [TestVectorsDSAWycheproof(wycheproof_warnings, slow_tests)] - tests += [TestVectorsECDSAWycheproof(wycheproof_warnings, slow_tests)] - - return tests - - -if __name__ == '__main__': - def suite(): - return unittest.TestSuite(get_tests()) - unittest.main(defaultTest='suite') diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/indices/common/tree/base.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/indices/common/tree/base.py deleted file mode 100644 index 3859e4b239abda857052cd63f15b6de340136df6..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/gpt_index/indices/common/tree/base.py +++ /dev/null @@ -1,198 +0,0 @@ -"""Common classes/functions for tree index operations.""" - - -import asyncio -import logging -from typing import Dict, List, Sequence, Tuple - -from gpt_index.async_utils import run_async_tasks -from gpt_index.data_structs.data_structs import IndexGraph, Node -from gpt_index.indices.node_utils import get_text_splits_from_document -from gpt_index.indices.prompt_helper import PromptHelper -from gpt_index.indices.utils import get_sorted_node_list -from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor -from gpt_index.langchain_helpers.text_splitter import TextSplitter -from gpt_index.prompts.prompts import SummaryPrompt -from gpt_index.schema import BaseDocument - - -class GPTTreeIndexBuilder: - """GPT tree index builder. - - Helper class to build the tree-structured index, - or to synthesize an answer. - - """ - - def __init__( - self, - num_children: int, - summary_prompt: SummaryPrompt, - llm_predictor: LLMPredictor, - prompt_helper: PromptHelper, - text_splitter: TextSplitter, - use_async: bool = False, - ) -> None: - """Initialize with params.""" - if num_children < 2: - raise ValueError("Invalid number of children.") - self.num_children = num_children - self.summary_prompt = summary_prompt - self._llm_predictor = llm_predictor - self._prompt_helper = prompt_helper - self._text_splitter = text_splitter - self._use_async = use_async - - def _get_nodes_from_document( - self, start_idx: int, document: BaseDocument - ) -> Dict[int, Node]: - """Add document to index.""" - # NOTE: summary prompt does not need to be partially formatted - text_splits = get_text_splits_from_document( - document=document, text_splitter=self._text_splitter - ) - text_chunks = [text_split.text_chunk for text_split in text_splits] - doc_nodes = { - (start_idx + i): Node( - text=t, - index=(start_idx + i), - ref_doc_id=document.get_doc_id(), - embedding=document.embedding, - extra_info=document.extra_info, - ) - for i, t in enumerate(text_chunks) - } - return doc_nodes - - def build_from_text( - self, - documents: Sequence[BaseDocument], - build_tree: bool = True, - ) -> IndexGraph: - """Build from text. - - Returns: - IndexGraph: graph object consisting of all_nodes, root_nodes - - """ - all_nodes: Dict[int, Node] = {} - for d in documents: - all_nodes.update(self._get_nodes_from_document(len(all_nodes), d)) - - if build_tree: - # instantiate all_nodes from initial text chunks - root_nodes = self.build_index_from_nodes(all_nodes, all_nodes) - else: - # if build_tree is False, then don't surface any root nodes - root_nodes = {} - return IndexGraph(all_nodes=all_nodes, root_nodes=root_nodes) - - def _prepare_node_and_text_chunks( - self, cur_nodes: Dict[int, Node] - ) -> Tuple[List[int], List[List[Node]], List[str]]: - """Prepare node and text chunks.""" - cur_node_list = get_sorted_node_list(cur_nodes) - logging.info( - f"> Building index from nodes: {len(cur_nodes) // self.num_children} chunks" - ) - indices, cur_nodes_chunks, text_chunks = [], [], [] - for i in range(0, len(cur_node_list), self.num_children): - cur_nodes_chunk = cur_node_list[i : i + self.num_children] - text_chunk = self._prompt_helper.get_text_from_nodes( - cur_nodes_chunk, prompt=self.summary_prompt - ) - indices.append(i) - cur_nodes_chunks.append(cur_nodes_chunk) - text_chunks.append(text_chunk) - return indices, cur_nodes_chunks, text_chunks - - def _construct_parent_nodes( - self, - cur_index: int, - indices: List[int], - cur_nodes_chunks: List[List[Node]], - summaries: List[str], - ) -> Dict[int, Node]: - """Construct parent nodes.""" - new_node_dict = {} - for i, cur_nodes_chunk, new_summary in zip( - indices, cur_nodes_chunks, summaries - ): - logging.debug( - f"> {i}/{len(cur_nodes_chunk)}, " - "summary: {truncate_text(new_summary, 50)}" - ) - new_node = Node( - text=new_summary, - index=cur_index, - child_indices={n.index for n in cur_nodes_chunk}, - ) - new_node_dict[cur_index] = new_node - cur_index += 1 - return new_node_dict - - def build_index_from_nodes( - self, - cur_nodes: Dict[int, Node], - all_nodes: Dict[int, Node], - ) -> Dict[int, Node]: - """Consolidates chunks recursively, in a bottoms-up fashion.""" - cur_index = len(all_nodes) - indices, cur_nodes_chunks, text_chunks = self._prepare_node_and_text_chunks( - cur_nodes - ) - - if self._use_async: - tasks = [ - self._llm_predictor.apredict( - self.summary_prompt, context_str=text_chunk - ) - for text_chunk in text_chunks - ] - outputs: List[Tuple[str, str]] = run_async_tasks(tasks) - summaries = [output[0] for output in outputs] - else: - summaries = [ - self._llm_predictor.predict( - self.summary_prompt, context_str=text_chunk - )[0] - for text_chunk in text_chunks - ] - - new_node_dict = self._construct_parent_nodes( - cur_index, indices, cur_nodes_chunks, summaries - ) - all_nodes.update(new_node_dict) - - if len(new_node_dict) <= self.num_children: - return new_node_dict - else: - return self.build_index_from_nodes(new_node_dict, all_nodes) - - async def abuild_index_from_nodes( - self, - cur_nodes: Dict[int, Node], - all_nodes: Dict[int, Node], - ) -> Dict[int, Node]: - """Consolidates chunks recursively, in a bottoms-up fashion.""" - cur_index = len(all_nodes) - indices, cur_nodes_chunks, text_chunks = self._prepare_node_and_text_chunks( - cur_nodes - ) - - tasks = [ - self._llm_predictor.apredict(self.summary_prompt, context_str=text_chunk) - for text_chunk in text_chunks - ] - outputs: List[Tuple[str, str]] = await asyncio.gather(*tasks) - summaries = [output[0] for output in outputs] - - new_node_dict = self._construct_parent_nodes( - cur_index, indices, cur_nodes_chunks, summaries - ) - all_nodes.update(new_node_dict) - - if len(new_node_dict) <= self.num_children: - return new_node_dict - else: - return self.build_index_from_nodes(new_node_dict, all_nodes) diff --git a/spaces/johnslegers/epic-diffusion-inference/app.py b/spaces/johnslegers/epic-diffusion-inference/app.py deleted file mode 100644 index 12d56ba46c931d370b152fa49983e79c77381a64..0000000000000000000000000000000000000000 --- a/spaces/johnslegers/epic-diffusion-inference/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/johnslegers/epic-diffusion").launch() \ No newline at end of file diff --git a/spaces/juancopi81/whisper-youtube-2-hf_dataset/test/test_addtitletransform.py b/spaces/juancopi81/whisper-youtube-2-hf_dataset/test/test_addtitletransform.py deleted file mode 100644 index d81cfaf703ec3ec8b6f63b705eac7fb42cfdb527..0000000000000000000000000000000000000000 --- a/spaces/juancopi81/whisper-youtube-2-hf_dataset/test/test_addtitletransform.py +++ /dev/null @@ -1,19 +0,0 @@ -from youtube_transcriber.transforming.addtitletransform import AddTitleTransform -from youtube_transcriber.video import YoutubeVideo - -def test_add_title_transform_init(): - transform = AddTitleTransform() - assert type(transform) == AddTitleTransform - -def test_apply(): - transform = AddTitleTransform() - raw_video = YoutubeVideo(channel_name="Tquotes", - url="https://www.youtube.com/watch?v=NSkoGZ8J1Ag") - transformed_video = transform.apply(raw_video) - assert type(transformed_video) == YoutubeVideo - assert transformed_video.channel_name == raw_video.channel_name - assert transformed_video.url == raw_video.url - assert transformed_video.title == "Steve Jobs quotes Bob Dylan" - assert transformed_video.description == raw_video.description - assert transformed_video.transcription == raw_video.transcription - assert transformed_video.segments == raw_video.segments \ No newline at end of file diff --git a/spaces/juancopi81/youtube-music-transcribe/mt3/metrics.py b/spaces/juancopi81/youtube-music-transcribe/mt3/metrics.py deleted file mode 100644 index 42f2358f187fbb9a4213310b2e054ab6fc85405d..0000000000000000000000000000000000000000 --- a/spaces/juancopi81/youtube-music-transcribe/mt3/metrics.py +++ /dev/null @@ -1,392 +0,0 @@ -# Copyright 2022 The MT3 Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Transcription metrics.""" - -import collections -import copy -import functools -from typing import Any, Iterable, Mapping, Optional, Sequence - -import mir_eval - -from mt3 import event_codec -from mt3 import metrics_utils -from mt3 import note_sequences -from mt3 import spectrograms -from mt3 import summaries -from mt3 import vocabularies - -import note_seq -import numpy as np -import seqio - - -def _program_aware_note_scores( - ref_ns: note_seq.NoteSequence, - est_ns: note_seq.NoteSequence, - granularity_type: str -) -> Mapping[str, float]: - """Compute precision/recall/F1 for notes taking program into account. - - For non-drum tracks, uses onsets and offsets. For drum tracks, uses onsets - only. Applies MIDI program map of specified granularity type. - - Args: - ref_ns: Reference NoteSequence with ground truth labels. - est_ns: Estimated NoteSequence. - granularity_type: String key in vocabularies.PROGRAM_GRANULARITIES dict. - - Returns: - A dictionary containing precision, recall, and F1 score. - """ - program_map_fn = vocabularies.PROGRAM_GRANULARITIES[ - granularity_type].program_map_fn - - ref_ns = copy.deepcopy(ref_ns) - for note in ref_ns.notes: - if not note.is_drum: - note.program = program_map_fn(note.program) - - est_ns = copy.deepcopy(est_ns) - for note in est_ns.notes: - if not note.is_drum: - note.program = program_map_fn(note.program) - - program_and_is_drum_tuples = ( - set((note.program, note.is_drum) for note in ref_ns.notes) | - set((note.program, note.is_drum) for note in est_ns.notes) - ) - - drum_precision_sum = 0.0 - drum_precision_count = 0 - drum_recall_sum = 0.0 - drum_recall_count = 0 - - nondrum_precision_sum = 0.0 - nondrum_precision_count = 0 - nondrum_recall_sum = 0.0 - nondrum_recall_count = 0 - - for program, is_drum in program_and_is_drum_tuples: - est_track = note_sequences.extract_track(est_ns, program, is_drum) - ref_track = note_sequences.extract_track(ref_ns, program, is_drum) - - est_intervals, est_pitches, unused_est_velocities = ( - note_seq.sequences_lib.sequence_to_valued_intervals(est_track)) - ref_intervals, ref_pitches, unused_ref_velocities = ( - note_seq.sequences_lib.sequence_to_valued_intervals(ref_track)) - - args = { - 'ref_intervals': ref_intervals, 'ref_pitches': ref_pitches, - 'est_intervals': est_intervals, 'est_pitches': est_pitches - } - if is_drum: - args['offset_ratio'] = None - - precision, recall, unused_f_measure, unused_avg_overlap_ratio = ( - mir_eval.transcription.precision_recall_f1_overlap(**args)) - - if is_drum: - drum_precision_sum += precision * len(est_intervals) - drum_precision_count += len(est_intervals) - drum_recall_sum += recall * len(ref_intervals) - drum_recall_count += len(ref_intervals) - else: - nondrum_precision_sum += precision * len(est_intervals) - nondrum_precision_count += len(est_intervals) - nondrum_recall_sum += recall * len(ref_intervals) - nondrum_recall_count += len(ref_intervals) - - precision_sum = drum_precision_sum + nondrum_precision_sum - precision_count = drum_precision_count + nondrum_precision_count - recall_sum = drum_recall_sum + nondrum_recall_sum - recall_count = drum_recall_count + nondrum_recall_count - - precision = (precision_sum / precision_count) if precision_count else 0 - recall = (recall_sum / recall_count) if recall_count else 0 - f_measure = mir_eval.util.f_measure(precision, recall) - - drum_precision = ((drum_precision_sum / drum_precision_count) - if drum_precision_count else 0) - drum_recall = ((drum_recall_sum / drum_recall_count) - if drum_recall_count else 0) - drum_f_measure = mir_eval.util.f_measure(drum_precision, drum_recall) - - nondrum_precision = ((nondrum_precision_sum / nondrum_precision_count) - if nondrum_precision_count else 0) - nondrum_recall = ((nondrum_recall_sum / nondrum_recall_count) - if nondrum_recall_count else 0) - nondrum_f_measure = mir_eval.util.f_measure(nondrum_precision, nondrum_recall) - - return { - f'Onset + offset + program precision ({granularity_type})': precision, - f'Onset + offset + program recall ({granularity_type})': recall, - f'Onset + offset + program F1 ({granularity_type})': f_measure, - f'Drum onset precision ({granularity_type})': drum_precision, - f'Drum onset recall ({granularity_type})': drum_recall, - f'Drum onset F1 ({granularity_type})': drum_f_measure, - f'Nondrum onset + offset + program precision ({granularity_type})': - nondrum_precision, - f'Nondrum onset + offset + program recall ({granularity_type})': - nondrum_recall, - f'Nondrum onset + offset + program F1 ({granularity_type})': - nondrum_f_measure - } - - -def _note_onset_tolerance_sweep( - ref_ns: note_seq.NoteSequence, est_ns: note_seq.NoteSequence, - tolerances: Iterable[float] = (0.01, 0.02, 0.05, 0.1, 0.2, 0.5) -) -> Mapping[str, float]: - """Compute note precision/recall/F1 across a range of tolerances.""" - est_intervals, est_pitches, unused_est_velocities = ( - note_seq.sequences_lib.sequence_to_valued_intervals(est_ns)) - ref_intervals, ref_pitches, unused_ref_velocities = ( - note_seq.sequences_lib.sequence_to_valued_intervals(ref_ns)) - - scores = {} - - for tol in tolerances: - precision, recall, f_measure, _ = ( - mir_eval.transcription.precision_recall_f1_overlap( - ref_intervals=ref_intervals, ref_pitches=ref_pitches, - est_intervals=est_intervals, est_pitches=est_pitches, - onset_tolerance=tol, offset_min_tolerance=tol)) - - scores[f'Onset + offset precision ({tol})'] = precision - scores[f'Onset + offset recall ({tol})'] = recall - scores[f'Onset + offset F1 ({tol})'] = f_measure - - return scores - - -def transcription_metrics( - targets: Sequence[Mapping[str, Any]], - predictions: Sequence[Mapping[str, Any]], - codec: event_codec.Codec, - spectrogram_config: spectrograms.SpectrogramConfig, - onsets_only: bool, - use_ties: bool, - track_specs: Optional[Sequence[note_sequences.TrackSpec]] = None, - num_summary_examples: int = 5, - frame_fps: float = 62.5, - frame_velocity_threshold: int = 30, -) -> Mapping[str, seqio.metrics.MetricValue]: - """Compute mir_eval transcription metrics.""" - if onsets_only and use_ties: - raise ValueError('Ties not compatible with onset-only transcription.') - if onsets_only: - encoding_spec = note_sequences.NoteOnsetEncodingSpec - elif not use_ties: - encoding_spec = note_sequences.NoteEncodingSpec - else: - encoding_spec = note_sequences.NoteEncodingWithTiesSpec - - # The first target for each full example contains the NoteSequence; just - # organize by ID. - full_targets = {} - for target in targets: - if target['ref_ns']: - full_targets[target['unique_id']] = {'ref_ns': target['ref_ns']} - - # Gather all predictions for the same ID and concatenate them in time order, - # to construct full-length predictions. - full_predictions = metrics_utils.combine_predictions_by_id( - predictions=predictions, - combine_predictions_fn=functools.partial( - metrics_utils.event_predictions_to_ns, - codec=codec, - encoding_spec=encoding_spec)) - - assert sorted(full_targets.keys()) == sorted(full_predictions.keys()) - - full_target_prediction_pairs = [ - (full_targets[id], full_predictions[id]) - for id in sorted(full_targets.keys()) - ] - - scores = collections.defaultdict(list) - all_track_pianorolls = collections.defaultdict(list) - for target, prediction in full_target_prediction_pairs: - scores['Invalid events'].append(prediction['est_invalid_events']) - scores['Dropped events'].append(prediction['est_dropped_events']) - - def remove_drums(ns): - ns_drumless = note_seq.NoteSequence() - ns_drumless.CopyFrom(ns) - del ns_drumless.notes[:] - ns_drumless.notes.extend([note for note in ns.notes if not note.is_drum]) - return ns_drumless - - est_ns_drumless = remove_drums(prediction['est_ns']) - ref_ns_drumless = remove_drums(target['ref_ns']) - - # Whether or not there are separate tracks, compute metrics for the full - # NoteSequence minus drums. - est_tracks = [est_ns_drumless] - ref_tracks = [ref_ns_drumless] - use_track_offsets = [not onsets_only] - use_track_velocities = [not onsets_only] - track_instrument_names = [''] - - if track_specs is not None: - # Compute transcription metrics separately for each track. - for spec in track_specs: - est_tracks.append(note_sequences.extract_track( - prediction['est_ns'], spec.program, spec.is_drum)) - ref_tracks.append(note_sequences.extract_track( - target['ref_ns'], spec.program, spec.is_drum)) - use_track_offsets.append(not onsets_only and not spec.is_drum) - use_track_velocities.append(not onsets_only) - track_instrument_names.append(spec.name) - - for est_ns, ref_ns, use_offsets, use_velocities, instrument_name in zip( - est_tracks, ref_tracks, use_track_offsets, use_track_velocities, - track_instrument_names): - track_scores = {} - - est_intervals, est_pitches, est_velocities = ( - note_seq.sequences_lib.sequence_to_valued_intervals(est_ns)) - - ref_intervals, ref_pitches, ref_velocities = ( - note_seq.sequences_lib.sequence_to_valued_intervals(ref_ns)) - - # Precision / recall / F1 using onsets (and pitches) only. - precision, recall, f_measure, avg_overlap_ratio = ( - mir_eval.transcription.precision_recall_f1_overlap( - ref_intervals=ref_intervals, - ref_pitches=ref_pitches, - est_intervals=est_intervals, - est_pitches=est_pitches, - offset_ratio=None)) - del avg_overlap_ratio - track_scores['Onset precision'] = precision - track_scores['Onset recall'] = recall - track_scores['Onset F1'] = f_measure - - if use_offsets: - # Precision / recall / F1 using onsets and offsets. - precision, recall, f_measure, avg_overlap_ratio = ( - mir_eval.transcription.precision_recall_f1_overlap( - ref_intervals=ref_intervals, - ref_pitches=ref_pitches, - est_intervals=est_intervals, - est_pitches=est_pitches)) - del avg_overlap_ratio - track_scores['Onset + offset precision'] = precision - track_scores['Onset + offset recall'] = recall - track_scores['Onset + offset F1'] = f_measure - - if use_velocities: - # Precision / recall / F1 using onsets and velocities (no offsets). - precision, recall, f_measure, avg_overlap_ratio = ( - mir_eval.transcription_velocity.precision_recall_f1_overlap( - ref_intervals=ref_intervals, - ref_pitches=ref_pitches, - ref_velocities=ref_velocities, - est_intervals=est_intervals, - est_pitches=est_pitches, - est_velocities=est_velocities, - offset_ratio=None)) - track_scores['Onset + velocity precision'] = precision - track_scores['Onset + velocity recall'] = recall - track_scores['Onset + velocity F1'] = f_measure - - if use_offsets and use_velocities: - # Precision / recall / F1 using onsets, offsets, and velocities. - precision, recall, f_measure, avg_overlap_ratio = ( - mir_eval.transcription_velocity.precision_recall_f1_overlap( - ref_intervals=ref_intervals, - ref_pitches=ref_pitches, - ref_velocities=ref_velocities, - est_intervals=est_intervals, - est_pitches=est_pitches, - est_velocities=est_velocities)) - track_scores['Onset + offset + velocity precision'] = precision - track_scores['Onset + offset + velocity recall'] = recall - track_scores['Onset + offset + velocity F1'] = f_measure - - # Calculate framewise metrics. - is_drum = all([n.is_drum for n in ref_ns.notes]) - ref_pr = metrics_utils.get_prettymidi_pianoroll( - ref_ns, frame_fps, is_drum=is_drum) - est_pr = metrics_utils.get_prettymidi_pianoroll( - est_ns, frame_fps, is_drum=is_drum) - all_track_pianorolls[instrument_name].append((est_pr, ref_pr)) - frame_precision, frame_recall, frame_f1 = metrics_utils.frame_metrics( - ref_pr, est_pr, velocity_threshold=frame_velocity_threshold) - track_scores['Frame Precision'] = frame_precision - track_scores['Frame Recall'] = frame_recall - track_scores['Frame F1'] = frame_f1 - - for metric_name, metric_value in track_scores.items(): - if instrument_name: - scores[f'{instrument_name}/{metric_name}'].append(metric_value) - else: - scores[metric_name].append(metric_value) - - # Add program-aware note metrics for all program granularities. - # Note that this interacts with the training program granularity; in - # particular granularities *higher* than the training granularity are likely - # to have poor metrics. - for granularity_type in vocabularies.PROGRAM_GRANULARITIES: - for name, score in _program_aware_note_scores( - target['ref_ns'], prediction['est_ns'], - granularity_type=granularity_type).items(): - scores[name].append(score) - - # Add (non-program-aware) note metrics across a range of onset/offset - # tolerances. - for name, score in _note_onset_tolerance_sweep( - ref_ns=ref_ns_drumless, est_ns=est_ns_drumless).items(): - scores[name].append(score) - - mean_scores = {k: np.mean(v) for k, v in scores.items()} - - score_histograms = {'%s (hist)' % k: seqio.metrics.Histogram(np.array(v)) - for k, v in scores.items()} - - # Pick several examples to summarize. - targets_to_summarize, predictions_to_summarize = zip( - *full_target_prediction_pairs[:num_summary_examples]) - - # Compute audio summaries. - audio_summaries = summaries.audio_summaries( - targets=targets_to_summarize, - predictions=predictions_to_summarize, - spectrogram_config=spectrogram_config) - - # Compute transcription summaries. - transcription_summaries = summaries.transcription_summaries( - targets=targets_to_summarize, - predictions=predictions_to_summarize, - spectrogram_config=spectrogram_config, - ns_feature_suffix='ns', - track_specs=track_specs) - - pianorolls_to_summarize = { - k: v[:num_summary_examples] for k, v in all_track_pianorolls.items() - } - - prettymidi_pianoroll_summaries = summaries.prettymidi_pianoroll( - pianorolls_to_summarize, fps=frame_fps) - - return { - **mean_scores, - **score_histograms, - **audio_summaries, - **transcription_summaries, - **prettymidi_pianoroll_summaries, - } diff --git a/spaces/juancopi81/youtube-music-transcribe/t5x/optimizers.py b/spaces/juancopi81/youtube-music-transcribe/t5x/optimizers.py deleted file mode 100644 index 5ec7778e346495a07f413667f8dff00a02725ecf..0000000000000000000000000000000000000000 --- a/spaces/juancopi81/youtube-music-transcribe/t5x/optimizers.py +++ /dev/null @@ -1,706 +0,0 @@ -# Copyright 2022 The T5X Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""T5X Optimizer Support. - -Tools for wrapping Optax optimizers and handling SPMD annotations for use with -pjit. - -Additional support for the legacy Adafactor implementation. -""" - -import functools -from typing import Any, Optional, Union, Sequence, Tuple - -import flax -from flax import optim # just used for transitional type definitions -from flax import serialization -from flax import struct -from flax import traverse_util -from flax.core import frozen_dict -from flax.serialization import from_state_dict -from flax.serialization import to_state_dict -import jax -import jax.numpy as jnp -import optax - -freeze = flax.core.frozen_dict.freeze -unfreeze = flax.core.frozen_dict.unfreeze - -Dtype = Any - - -@struct.dataclass -class OptimizerState: - step: jnp.ndarray - param_states: Any - - -class OptimizerDef: - """Base class for an optimizer definition.""" - - def __init__(self, hyper_params): - self.hyper_params = hyper_params - - def apply_gradient(self, hyper_params, params, state, grads): - """Applies a gradient for a set of parameters.""" - raise NotImplementedError() - - def init_state(self, params): - raise NotImplementedError() - - def update_hyper_params(self, **hyper_param_overrides): - """Updates the hyper parameters with a set of overrides. - - Args: - **hyper_param_overrides: the hyper parameters updates will override the - defaults specified in the `OptimizerDef`. Pass `hyper_params=...` to - replace all hyper parameters. - - Returns: - The new hyper parameters. - """ - hp = hyper_param_overrides.pop('hyper_params', self.hyper_params) - if hyper_param_overrides: - hp = hp.replace(**hyper_param_overrides) - return hp - - def create(self, target): - """Creates a new optimizer for the given target. - - Args: - target: the object to be optimized. This is typically a variable dict - returned by `flax.linen.Module.init()`, but it can also be a container - of variables dicts, e.g. `(v1, v2)` and `('var1': v1, 'var2': v2)` are - valid inputs as well. - - Returns: - An instance of `Optimizer`. - """ - opt_def = self - state = opt_def.init_state(target) - return Optimizer(opt_def, state, target) - - def state_dict(self, target, state): - return to_state_dict({ - 'target': to_state_dict(target), - 'state': to_state_dict(state) - }) - - def restore_state(self, opt_target, opt_state, state_dict): - """Restore the optimizer target and state from the state dict. - - Args: - opt_target: the optimizer target. - opt_state: the optimizer state. - state_dict: the state dict containing the desired new state of the - optimizer. - - Returns: - a tuple of the optimizer target and state with the restored values from - the state dict. - """ - - opt_target = from_state_dict(opt_target, state_dict['target']) - opt_state = from_state_dict(opt_state, state_dict['state']) - return opt_target, opt_state - - -class Optimizer(struct.PyTreeNode): - """Legacy flax optimizer class. - - Optimizer carries the target and optimizer state. The optimizer is updated - using the method apply_gradient. - - Attributes: - optimizer_def: The optimizer definition. - state: The initial state of the optimizer. - target: The target to optimizer. - """ - - optimizer_def: OptimizerDef = struct.field(pytree_node=False) - state: Any = struct.field(pytree_node=True) - target: Any = struct.field(pytree_node=True) - - def apply_gradient(self, grads, **hyper_param_overrides): - """Applies a pytree of gradients to the target. - - Args: - grads: A pytree of gradients. - **hyper_param_overrides: the hyper parameters passed to apply_gradient - will override the defaults specified in the `OptimizerDef`. Pass - `hyper_params=...` to replace all hyper parameters. - - Returns: - A new optimizer with the updated target and state. - """ - hyper_params = self.optimizer_def.update_hyper_params( - **hyper_param_overrides) - new_target, new_state = self.optimizer_def.apply_gradient( - hyper_params, self.target, self.state, grads) - return self.replace(target=new_target, state=new_state) - - def state_dict(self): - return self.optimizer_def.state_dict(self.target, self.state) - - def restore_state(self, state): - target, state = self.optimizer_def.restore_state(self.target, self.state, - state) - return self.replace(target=target, state=state) - - -# Transitional Type Definitions - -OptimizerType = Union[optim.Optimizer, Optimizer] -OptimizerStateType = Union[optim.OptimizerState, OptimizerState] -OptimizerDefType = Union[optim.OptimizerDef, OptimizerDef] - -# Optax Elementwise Wrapper - - -class OptaxStatePartitionRules: - """Collection of rules to partition optax states. - - These rules work for optimizers whose states are simply replications of - params, e.g., Adam. Optimizers that aim to save memory by factoring states, - e.g., Adafactor, SM3, are not supported currently. - """ - - # Rules mapping a particular optax state to a callable returning the state - # with arrays replaced by t5x PartitionSpec or None. - # - # NOTE(levskaya): This is not an entirely exhaustive list, add to this list - # to support additional optimizers / transformations. - # - # pylint: disable=g-long-lambda - - _RULES = { - - # Leaf Optax States: - optax.AddNoiseState: - lambda state, params_axes: optax.AddNoiseState( - count=None, rng_key=None), - optax.DifferentiallyPrivateAggregateState: - lambda state, params_axes: optax.DifferentiallyPrivateAggregateState( - rng_key=None), - optax.EmaState: - lambda state, params_axes: optax.EmaState( - count=None, ema=params_axes), - optax.EmptyState: - lambda state, params_axes: optax.EmptyState(), - optax.TraceState: - lambda state, params_axes: optax.TraceState(trace=params_axes), - optax.ScaleByAdamState: - lambda state, params_axes: optax.ScaleByAdamState( - count=None, mu=params_axes, nu=params_axes), - optax.ScaleByBeliefState: - lambda state, params_axes: optax.ScaleByBeliefState( - count=None, mu=params_axes, nu=params_axes), - optax.ScaleByRssState: - lambda state, params_axes: optax.ScaleByRssState( - sum_of_squares=params_axes), - optax.ScaleByRmsState: - lambda state, params_axes: optax.ScaleByRmsState(nu=params_axes), - optax.ScaleByRStdDevState: - lambda state, params_axes: optax.ScaleByRStdDevState( - mu=params_axes, nu=params_axes), - optax.ScaleBySM3State: - lambda state, params_axes: optax.ScaleBySM3State( - mu=params_axes, nu=params_axes), - optax.ScaleByTrustRatioState: - lambda state, params_axes: optax.ScaleByTrustRatioState(), - optax.ScaleByScheduleState: - lambda state, params_axes: optax.ScaleByScheduleState(count=None), - optax.ScaleByFromageState: - lambda state, params_axes: optax.ScaleByFromageState(count=None), - optax.ZeroNansState: - lambda state, params_axes: optax.ZeroNansState(found_nan=None), - # FactoredState - - # Recursive, Combinator Optax States: - - # MaskedState - optax.MaskedState: - lambda state, params_axes: optax.MaskedState( - inner_state=OptaxStatePartitionRules.derive_optax_logical_axes( - state.inner_state, params_axes)), - optax.InjectHyperparamsState: - lambda state, params_axes: optax.InjectHyperparamsState( - count=None, - hyperparams=jax.tree_map(lambda x: None, state.hyperparams), - inner_state=OptaxStatePartitionRules.derive_optax_logical_axes( - state.inner_state, params_axes)), - optax.MultiStepsState: - lambda state, params_axes: optax.MultiStepsState( - mini_step=None, - gradient_step=None, - inner_opt_state=OptaxStatePartitionRules. - derive_optax_logical_axes( # pylint: disable=line-too-long - state.inner_opt_state, params_axes), - acc_grads=params_axes), - optax.ApplyIfFiniteState: - lambda state, params_axes: optax.ApplyIfFiniteState( - notfinite_count=None, - last_finite=None, - total_notfinite=None, - inner_state=OptaxStatePartitionRules.derive_optax_logical_axes( - state.inner_state, params_axes)), - optax.MaybeUpdateState: - lambda state, params_axes: optax.MaybeUpdateState( - inner_state=OptaxStatePartitionRules.derive_optax_logical_axes( - state.inner_state, params_axes), - step=None), - optax.MultiTransformState: - lambda state, params_axes: optax.MultiTransformState( - inner_states=OptaxStatePartitionRules.derive_optax_logical_axes( - state.inner_states, params_axes)), - # LookaheadState - # SplitRealAndImaginaryState - } - # pylint: enable=g-long-lambda - - @classmethod - def _is_optax_state(cls, x): - """Returns true if an object is an optax state. - - Note that in optax states are simply derived from NamedTuple, so we have to - do some hacky name matching. - - Args: - x: object. - - Returns: - True if x is an optax state. - """ - # A solution from stack overflow. Note that isinstance(x, NamedTuple) would - # not work. - is_named_tuple = ( - isinstance(x, tuple) and hasattr(x, '_asdict') and - hasattr(x, '_fields')) - result = is_named_tuple and type(x).__name__.endswith('State') - return result - - @classmethod - def derive_optax_logical_axes(cls, optax_state, params_axes): - """Derived logical axes for optax state.""" - # Flatten the optax state but do not go into the registered states. - flattened_state, tree_def = jax.tree_flatten( - optax_state, is_leaf=cls._is_optax_state) - - def derive_fn(x): - if type(x) not in cls._RULES: - if cls._is_optax_state(x): - raise ValueError( - f'Encountered unregistered optax state type {type(x).__name__}') - return None - return cls._RULES[type(x)](x, params_axes) - - flattened_axes = [derive_fn(x) for x in flattened_state] - derived_axes = jax.tree_unflatten(tree_def, flattened_axes) - return derived_axes - - -@struct.dataclass -class _OptaxWrapperHyperParams: - """Dummy hyper params struct, not used.""" - # Required by t5x trainer. Unused as learning rate scheduling is done using - # optax.Schedule. - learning_rate: Optional[float] = None - - -class OptaxWrapper(OptimizerDef): - """Wrapper to make optax optimizer compatible with T5X.""" - - def __init__(self, optax_optimizer: optax.GradientTransformation): - """Initializer. - - Args: - optax_optimizer: An optax optimizer. - """ - self.optax_optimizer = optax_optimizer - super().__init__(hyper_params=_OptaxWrapperHyperParams()) - - def init_state(self, params): - """Create initial state based on the params to optimize. - - Args: - params: PyTree of parameters to optimize. - - Returns: - Initial optimizer state. - """ - state = OptimizerState( - step=0, param_states=self.optax_optimizer.init(params)) - return state - - def apply_gradient(self, hyper_params, params, state, grads): - """Applies gradient. - - Args: - hyper_params: Unused hyper parameters. - params: PyTree of the parameters. - state: A named tuple containing the state of the optimizer. - grads: PyTree of the gradients for the parameters. - - Returns: - A tuple containing the new parameters and the new optimizer state. - """ - del hyper_params - - updates, new_optax_state = self.optax_optimizer.update( - grads, state.param_states, params) - new_params = optax.apply_updates(params, updates) - return new_params, OptimizerState( - step=state.step + 1, param_states=new_optax_state) - - def derive_logical_axes(self, optimizer, param_logical_axes): - """Derives optimizer state logical axes from params logical axes. - - Args: - optimizer: `optimizers.Optimizer` instance. - param_logical_axes: A PyTree where each leaf is a t5x PartitionSpec. - - Returns: - An `optimizers.Optimizer` instance, with all the leafs replaced by t5x - PartitionSpec or None (no partition). - """ - optimizer_logical_axes = jax.tree_map(lambda x: None, - optimizer.state_dict()) - optimizer_logical_axes['target'] = param_logical_axes - - optax_state_axes = OptaxStatePartitionRules.derive_optax_logical_axes( - optimizer.state.param_states, param_logical_axes) - - optimizer_logical_axes['state']['param_states'] = ( - serialization.to_state_dict(optax_state_axes)) - - return optimizer.restore_state(frozen_dict.unfreeze(optimizer_logical_axes)) - - def state_dict(self, target, state): - """Override state dict function. - - We need to override this function because many optax transformations use - `optax.EmptyState`, which produces empty dict in the state dict. This causes - the T5 training loop to fail in multiple places. As a remedy, we will - filter out the generated state dict so that there are no empty dict in the - output. - - The restore_state function is also overridden to reconstruct those empty - dict. - - Args: - target: Pytree of target variables. - state: Pytree of optimizer state. - - Returns: - A nested state. - """ - state_dict = to_state_dict(state) - - # This step removes any empty dict (recursively) in the state dict. - state_dict = traverse_util.unflatten_dict( - traverse_util.flatten_dict(state_dict, sep='/'), sep='/') - - return to_state_dict({ - 'target': to_state_dict(target), - 'state': state_dict, - }) - - def restore_state(self, opt_target, opt_state, state_dict): - """Override to restore empty dicts corresponding to `optax.EmptyState`. - - Args: - opt_target: the optimizer target. - opt_state: the optimizer state. - state_dict: the state dict containing the desired new state of the - optimizer. - - Returns: - a tuple of the optimizer target and state with the restored values from - the state dict. - """ - opt_target = from_state_dict(opt_target, state_dict['target']) - - # Get all the possible keys in the reference optimizer state. - flat_ref_opt_state_dict = traverse_util.flatten_dict( - to_state_dict(opt_state), keep_empty_nodes=True, sep='/') - - flat_src_opt_state_dict = dict( - traverse_util.flatten_dict(state_dict['state'], sep='/')) - # Adding the empty paths back to flat_src_opt_state_dict. - for k, v in flat_ref_opt_state_dict.items(): - if k in flat_src_opt_state_dict: - continue - # The key is not in the input state dict, presumably because it - # corresponds to an empty dict. - if v != traverse_util.empty_node: - raise ValueError( - f'Failed to restore optimizer state, path {k} is not present ' - 'in the input optimizer state dict.') - flat_src_opt_state_dict[k] = v - - # Restore state from the enhanced state dict. - opt_state = from_state_dict( - opt_state, - traverse_util.unflatten_dict(flat_src_opt_state_dict, sep='/')) - return opt_target, opt_state - - -# Optax wrapper and elementary wrapped optax optimizers. - - -def wrap_optax_optimizer(optax_optimizer): - """Converts optax optimizer constructor to a wrapped T5X-compatible optimizer. - - Args: - optax_optimizer: an optax optimizer creation function that returns an optax - GradientTransformation. - - Returns: - A function that takes the same arguments as the original optax creation - function but instead returns a wrapped OptimizerDef-compatible interface for - using the optimizer with T5X. - """ - - @functools.wraps(optax_optimizer) - def wrapped_optimizer(*args, **kwargs) -> OptimizerDef: - return OptaxWrapper(optax_optimizer(*args, **kwargs)) - - return wrapped_optimizer - - -def chain( - transformations: Sequence[optax.GradientTransformation] -) -> optax.GradientTransformation: - return optax.chain(*transformations) - - -chain = wrap_optax_optimizer(chain) -adabelief = wrap_optax_optimizer(optax.adabelief) -adagrad = wrap_optax_optimizer(optax.adagrad) -adam = wrap_optax_optimizer(optax.adam) -adamw = wrap_optax_optimizer(optax.adamw) -fromage = wrap_optax_optimizer(optax.fromage) -lars = wrap_optax_optimizer(optax.lars) -lamb = wrap_optax_optimizer(optax.lamb) -noisy_sgd = wrap_optax_optimizer(optax.noisy_sgd) -radam = wrap_optax_optimizer(optax.radam) -rmsprop = wrap_optax_optimizer(optax.rmsprop) -sgd = wrap_optax_optimizer(optax.sgd) -yogi = wrap_optax_optimizer(optax.yogi) -dpsgd = wrap_optax_optimizer(optax.dpsgd) - -# Excluded optimizers: -# TODO(levskaya): add shampoo, sm3 -# We use our own generalized adafactor implementations. -# adafactor = wrap_optax_optimizer(optax.adafactor) -# We may use a more complete quantized implementation of SM3 -# sm3 = wrap_optax_optimizer(optax.sm3) - -# Inlined Legacy Generalized Multioptimizer - - -class _Marker: - """Used to mark unoptimized leaves.""" - - def __init__(self): - self._indices = [] - - -def _tree_of_paths(tree): - """Converts a (frozen) nested dictionary into a (frozen) dict of paths.""" - is_frozen = isinstance(tree, flax.core.frozen_dict.FrozenDict) - flat_tree = traverse_util.flatten_dict(unfreeze(tree)) - path_tree = traverse_util.unflatten_dict( - {k: '/'.join(k) for k in flat_tree.keys()}) - if is_frozen: - path_tree = freeze(path_tree) - return path_tree - - -def _subtree_from_traversal(traversal, tree): - """Creates a (frozen) tree subset given a traversal.""" - is_frozen = isinstance(tree, flax.core.frozen_dict.FrozenDict) - flat_tree = {} - for path, leaf in zip( - traversal.iterate(_tree_of_paths(tree)), traversal.iterate(tree)): - flat_tree[path] = leaf - new_tree = traverse_util.unflatten_dict( - {tuple(k.split('/')): v for k, v in flat_tree.items()}) - if is_frozen: - new_tree = freeze(new_tree) - return new_tree - - -def _update_subtree_of_traversal(traversal, tree, update): - """Updates a (frozen) tree's subset given a traversal and update subtree.""" - is_frozen = isinstance(tree, flax.core.frozen_dict.FrozenDict) - flat_tree = traverse_util.flatten_dict(unfreeze(tree)) - flat_tree = {'/'.join(k): v for k, v in flat_tree.items()} - for path, leaf in zip( - traversal.iterate(_tree_of_paths(update)), traversal.iterate(update)): - flat_tree[path] = leaf - nested_d = traverse_util.unflatten_dict( - {tuple(k.split('/')): v for k, v in flat_tree.items()}) - if is_frozen: - nested_d = freeze(nested_d) - return nested_d - - -class MultiOptimizer(OptimizerDef): - """Generalized Multioptimizer. - - NB: Although this is provided for legacy support, it is still quite general - and should work fine with wrapped optax optimizers. But do note that the more - canonical way of mixing multiple optimizers inside optax uses optax.masked or - optax.multi_transform instead. - - A MultiOptimizer is subclass of :class:`OptimizerDef` and useful for applying - separate optimizer algorithms to various subsets of the model parameters. - - The example below creates two optimizers using - :class:`flax.traverse_util.ModelParamTraversal`: - one to optimize ``kernel`` parameters and to optimize ``bias`` parameters. - Note each optimizer is created with a different learning rate:: - - kernels = traverse_util.ModelParamTraversal( - lambda path, _: 'kernel' in path) - biases = traverse_util.ModelParamTraversal(lambda path, _: 'bias' in path) - kernel_opt = optimizers.adam(learning_rate=0.01) - bias_opt = optimizers.adam(learning_rate=0.1) - opt_def = MultiOptimizer((kernels, kernel_opt), (biases, bias_opt)) - optimizer = opt_def.create(model) - - In order to train only a subset of the parameters, you can simply use a single - :class:`flax.traverse_util.ModelParamTraversal` instance. - - If you want to update the learning rates of both optimizers online with - different learning rate schedules, you should update the learning rates when - applying the gradient. In the following example, the second optimizer is not - doing any optimization during the first 1000 steps:: - - hparams = optimizer.optimizer_def.hyper_params - new_optimizer = optimizer.apply_gradient( - grads, - hyper_params=[ - hparams[0].replace(learning_rate=0.2), - hparams[1].replace(learning_rate=jnp.where(step < 1000, 0., lr)), - ]) - """ - - def __init__( - self, traversals_and_optimizers: Sequence[Tuple[traverse_util.Traversal, - OptimizerDef]]): - """Create a new MultiOptimizer. - - See docstring of :class:`MultiOptimizer` for more details. - - Args: - traversals_and_optimizers: pairs of flax.traverse_util.Traversal and - `optimizers.OptimizerDef` instances. - """ - traversals, sub_optimizers = zip(*traversals_and_optimizers) - hyper_params = [opt.hyper_params for opt in sub_optimizers] - super().__init__(hyper_params) - self.traversals = traversals - self.sub_optimizers = sub_optimizers - - def init_state(self, params): - param_states = jax.tree_map(lambda x: _Marker(), params) - overlap = False - for idx, traversal in enumerate(self.traversals): - for match in traversal.iterate(param_states): - match._indices.append(idx) # pylint: disable=protected-access - overlap |= len(match._indices) > 1 # pylint: disable=protected-access - if overlap: - raise ValueError( - 'Multiple optimizers match the same leaves : ' + - str(jax.tree_map(lambda match: match._indices, param_states))) # pylint: disable=protected-access - - param_states = jax.tree_map(lambda x: _Marker(), params) - for focus, opt_def in zip(self.traversals, self.sub_optimizers): - ps = _subtree_from_traversal(focus, params) - ss = opt_def.init_state(ps) - param_states = _update_subtree_of_traversal(focus, param_states, - ss.param_states) - # Update state to None when param is not optimized by any sub optimizer. - param_states = jax.tree_map( - lambda x: (None if isinstance(x, _Marker) else x), param_states) - return OptimizerState(jnp.asarray(0, dtype=jnp.int32), param_states) - - def apply_gradient(self, hyper_params, params, state, grads): - new_params = params - it = zip(self.traversals, self.sub_optimizers, hyper_params) - new_param_states = jax.tree_map(lambda x: _Marker(), params) - for focus, opt_def, hp in it: - ps = _subtree_from_traversal(focus, params) - gs = _subtree_from_traversal(focus, grads) - ss = _subtree_from_traversal(focus, state.param_states) - prev_ss = OptimizerState(state.step, ss) - new_ps, new_ss = opt_def.apply_gradient(hp, ps, prev_ss, gs) - new_params = _update_subtree_of_traversal(focus, new_params, new_ps) - new_param_states = _update_subtree_of_traversal(focus, new_param_states, - new_ss.param_states) - # Update state to None when param is not optimized by any sub optimizer. - new_param_states = jax.tree_map( - lambda x: (None if isinstance(x, _Marker) else x), new_param_states) - return new_params, OptimizerState(state.step + 1, new_param_states) - - def update_hyper_params(self, **hyper_param_overrides): - """Updates the hyper parameters with a set of overrides. - - This method is called from :meth:`Optimizer.apply_gradient` to create the - hyper parameters for a specific optimization step. - MultiOptimizer will apply the overrides for each sub optimizer. - - Args: - **hyper_param_overrides: the hyper parameters updates will override the - defaults specified in the `OptimizerDef`. Pass `hyper_params=...` to - replace all hyper parameters. - - Returns: - The new hyper parameters. - """ - hps = hyper_param_overrides.pop('hyper_params', self.hyper_params) - if hyper_param_overrides: - hps = [hp.replace(**hyper_param_overrides) for hp in hps] - return hps - - def set_param_axes(self, param_logical_axes): - """Derives factorization rules from model parameter logical axes.""" - for focus, opt_def in zip(self.traversals, self.sub_optimizers): - pla_subtree = _subtree_from_traversal(focus, param_logical_axes) - if hasattr(opt_def, 'set_param_axes'): - opt_def.set_param_axes(pla_subtree) - - def derive_logical_axes(self, optimizer, param_logical_axes): - """Derives optimizer logical partitioning from model logical partitions.""" - param_states = jax.tree_map(lambda x: _Marker(), - optimizer.state.param_states) - for focus, opt_def in zip(self.traversals, self.sub_optimizers): - if hasattr(opt_def, 'derive_logical_axes'): - ps = _subtree_from_traversal(focus, param_logical_axes) - ss = _subtree_from_traversal(focus, optimizer.state.param_states) - new_opt = opt_def.derive_logical_axes( - Optimizer(opt_def, OptimizerState(None, ss), ps), ps) - param_states = _update_subtree_of_traversal(focus, param_states, - new_opt.state.param_states) - # Update axes to None when param is not optimized by any sub optimizer. - param_states = jax.tree_map( - lambda x: (None if isinstance(x, _Marker) else x), param_states) - return Optimizer(optimizer.optimizer_def, - OptimizerState(None, param_states), param_logical_axes) - - # TODO(levskaya): add traversal handling for state_dict / restore_state - # this is required to make this work w. optax optimizers... diff --git a/spaces/justest/gpt4free/g4f/.v1/gpt4free/aiassist/README.md b/spaces/justest/gpt4free/g4f/.v1/gpt4free/aiassist/README.md deleted file mode 100644 index b61017841d3c52b8cd079e638b1fa35264aa15af..0000000000000000000000000000000000000000 --- a/spaces/justest/gpt4free/g4f/.v1/gpt4free/aiassist/README.md +++ /dev/null @@ -1,19 +0,0 @@ -aiassist.site - -### Example: `aiassist` - -```python -import aiassist - -question1 = "Who won the world series in 2020?" -req = aiassist.Completion.create(prompt=question1) -answer = req["text"] -message_id = req["parentMessageId"] - -question2 = "Where was it played?" -req2 = aiassist.Completion.create(prompt=question2, parentMessageId=message_id) -answer2 = req2["text"] - -print(answer) -print(answer2) -``` diff --git a/spaces/justest/gpt4free/g4f/Provider/Providers/AItianhu.py b/spaces/justest/gpt4free/g4f/Provider/Providers/AItianhu.py deleted file mode 100644 index d3e6a45f5a60800cc3648605f80cbc4da2d7e9d7..0000000000000000000000000000000000000000 --- a/spaces/justest/gpt4free/g4f/Provider/Providers/AItianhu.py +++ /dev/null @@ -1,36 +0,0 @@ -import os, requests -from ...typing import sha256, Dict, get_type_hints -import json - -url = "https://www.aitianhu.com/api/chat-process" -model = ['gpt-3.5-turbo'] -supports_stream = False -needs_auth = False - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - base = '' - for message in messages: - base += '%s: %s\n' % (message['role'], message['content']) - base += 'assistant:' - - headers = { - "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36" - } - data = { - "prompt": base, - "options": {}, - "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.", - "temperature": 0.8, - "top_p": 1 - } - response = requests.post(url, headers=headers, json=data) - if response.status_code == 200: - lines = response.text.strip().split('\n') - res = json.loads(lines[-1]) - yield res['text'] - else: - print(f"Error Occurred::{response.status_code}") - return None - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/juuxn/SimpleRVC/config.py b/spaces/juuxn/SimpleRVC/config.py deleted file mode 100644 index 270d25272b5bb2bb45725954d17f5d13957ffdab..0000000000000000000000000000000000000000 --- a/spaces/juuxn/SimpleRVC/config.py +++ /dev/null @@ -1,127 +0,0 @@ -import argparse -import sys -import torch -import json -from multiprocessing import cpu_count - -global usefp16 -usefp16 = False - - -def use_fp32_config(): - usefp16 = False - device_capability = 0 - if torch.cuda.is_available(): - device = torch.device("cuda:0") # Assuming you have only one GPU (index 0). - device_capability = torch.cuda.get_device_capability(device)[0] - if device_capability >= 7: - usefp16 = True - for config_file in ["32k.json", "40k.json", "48k.json"]: - with open(f"configs/{config_file}", "r") as d: - data = json.load(d) - - if "train" in data and "fp16_run" in data["train"]: - data["train"]["fp16_run"] = True - - with open(f"configs/{config_file}", "w") as d: - json.dump(data, d, indent=4) - - print(f"Set fp16_run to true in {config_file}") - - else: - for config_file in ["32k.json", "40k.json", "48k.json"]: - with open(f"configs/{config_file}", "r") as f: - data = json.load(f) - - if "train" in data and "fp16_run" in data["train"]: - data["train"]["fp16_run"] = False - - with open(f"configs/{config_file}", "w") as d: - json.dump(data, d, indent=4) - - print(f"Set fp16_run to false in {config_file}") - else: - print( - "CUDA is not available. Make sure you have an NVIDIA GPU and CUDA installed." - ) - return (usefp16, device_capability) - - -class Config: - def __init__(self): - self.device = "cuda:0" - self.is_half = True - self.n_cpu = 0 - self.gpu_name = None - self.gpu_mem = None - self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() - - # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+. - # check `getattr` and try it for compatibility - @staticmethod - def has_mps() -> bool: - if not torch.backends.mps.is_available(): - return False - try: - torch.zeros(1).to(torch.device("mps")) - return True - except Exception: - return False - - def device_config(self) -> tuple: - if torch.cuda.is_available(): - i_device = int(self.device.split(":")[-1]) - self.gpu_name = torch.cuda.get_device_name(i_device) - if ( - ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) - or "P40" in self.gpu_name.upper() - or "1060" in self.gpu_name - or "1070" in self.gpu_name - or "1080" in self.gpu_name - ): - print("Found GPU", self.gpu_name, ", force to fp32") - self.is_half = False - else: - print("Found GPU", self.gpu_name) - use_fp32_config() - self.gpu_mem = int( - torch.cuda.get_device_properties(i_device).total_memory - / 1024 - / 1024 - / 1024 - + 0.4 - ) - elif self.has_mps(): - print("No supported Nvidia GPU found, use MPS instead") - self.device = "mps" - self.is_half = False - use_fp32_config() - else: - print("No supported Nvidia GPU found, use CPU instead") - self.device = "cpu" - self.is_half = False - use_fp32_config() - - if self.n_cpu == 0: - self.n_cpu = cpu_count() - - if self.is_half: - # 6G显存配置 - x_pad = 3 - x_query = 10 - x_center = 60 - x_max = 65 - else: - # 5G显存配置 - x_pad = 1 - x_query = 6 - x_center = 38 - x_max = 41 - - if self.gpu_mem != None and self.gpu_mem <= 4: - x_pad = 1 - x_query = 5 - x_center = 30 - x_max = 32 - - return x_pad, x_query, x_center, x_max diff --git a/spaces/juuxn/SimpleRVC/constants.py b/spaces/juuxn/SimpleRVC/constants.py deleted file mode 100644 index 2384c60fe19b8578b7aa6bca0d30e81dd46cc265..0000000000000000000000000000000000000000 --- a/spaces/juuxn/SimpleRVC/constants.py +++ /dev/null @@ -1,441 +0,0 @@ -zips_folder = "./zips" -unzips_folder = "./unzips" - -VOICE_METHODS = ["Edge-tts", "CoquiTTS", "ElevenLabs",] - -BARK_VOICES = [ - "v2/en_speaker_0-Male", - "v2/en_speaker_1-Male", - "v2/en_speaker_2-Male", - "v2/en_speaker_3-Male", - "v2/en_speaker_4-Male", - "v2/en_speaker_5-Male", - "v2/en_speaker_6-Male", - "v2/en_speaker_7-Male", - "v2/en_speaker_8-Male", - "v2/en_speaker_9-Female", - "v2/zh_speaker_0-Male", - "v2/zh_speaker_1-Male", - "v2/zh_speaker_2-Male", - "v2/zh_speaker_3-Male", - "v2/zh_speaker_4-Female", - "v2/zh_speaker_5-Male", - "v2/zh_speaker_6-Female", - "v2/zh_speaker_7-Female", - "v2/zh_speaker_8-Male", - "v2/zh_speaker_9-Female", - "v2/fr_speaker_0-Male", - "v2/fr_speaker_1-Female", - "v2/fr_speaker_2-Female", - "v2/fr_speaker_3-Male", - "v2/fr_speaker_4-Male", - "v2/fr_speaker_5-Female", - "v2/fr_speaker_6-Male", - "v2/fr_speaker_7-Male", - "v2/fr_speaker_8-Male", - "v2/fr_speaker_9-Male", - "v2/de_speaker_0-Male", - "v2/de_speaker_1-Male", - "v2/de_speaker_2-Male", - "v2/de_speaker_3-Female", - "v2/de_speaker_4-Male", - "v2/de_speaker_5-Male", - "v2/de_speaker_6-Male", - "v2/de_speaker_7-Male", - "v2/de_speaker_8-Female", - "v2/de_speaker_9-Male", - "v2/hi_speaker_0-Female", - "v2/hi_speaker_1-Female", - "v2/hi_speaker_2-Male", - "v2/hi_speaker_3-Female", - "v2/hi_speaker_4-Female", - "v2/hi_speaker_5-Male", - "v2/hi_speaker_6-Male", - "v2/hi_speaker_7-Male", - "v2/hi_speaker_8-Male", - "v2/hi_speaker_9-Female", - "v2/it_speaker_0-Male", - "v2/it_speaker_1-Male", - "v2/it_speaker_2-Female", - "v2/it_speaker_3-Male", - "v2/it_speaker_4-Male", - "v2/it_speaker_5-Male", - "v2/it_speaker_6-Male", - "v2/it_speaker_7-Female", - "v2/it_speaker_8-Male", - "v2/it_speaker_9-Female", - "v2/ja_speaker_0-Female", - "v2/ja_speaker_1-Female", - "v2/ja_speaker_2-Male", - "v2/ja_speaker_3-Female", - "v2/ja_speaker_4-Female", - "v2/ja_speaker_5-Female", - "v2/ja_speaker_6-Male", - "v2/ja_speaker_7-Female", - "v2/ja_speaker_8-Female", - "v2/ja_speaker_9-Female", - "v2/ko_speaker_0-Female", - "v2/ko_speaker_1-Male", - "v2/ko_speaker_2-Male", - "v2/ko_speaker_3-Male", - "v2/ko_speaker_4-Male", - "v2/ko_speaker_5-Male", - "v2/ko_speaker_6-Male", - "v2/ko_speaker_7-Male", - "v2/ko_speaker_8-Male", - "v2/ko_speaker_9-Male", - "v2/pl_speaker_0-Male", - "v2/pl_speaker_1-Male", - "v2/pl_speaker_2-Male", - "v2/pl_speaker_3-Male", - "v2/pl_speaker_4-Female", - "v2/pl_speaker_5-Male", - "v2/pl_speaker_6-Female", - "v2/pl_speaker_7-Male", - "v2/pl_speaker_8-Male", - "v2/pl_speaker_9-Female", - "v2/pt_speaker_0-Male", - "v2/pt_speaker_1-Male", - "v2/pt_speaker_2-Male", - "v2/pt_speaker_3-Male", - "v2/pt_speaker_4-Male", - "v2/pt_speaker_5-Male", - "v2/pt_speaker_6-Male", - "v2/pt_speaker_7-Male", - "v2/pt_speaker_8-Male", - "v2/pt_speaker_9-Male", - "v2/ru_speaker_0-Male", - "v2/ru_speaker_1-Male", - "v2/ru_speaker_2-Male", - "v2/ru_speaker_3-Male", - "v2/ru_speaker_4-Male", - "v2/ru_speaker_5-Female", - "v2/ru_speaker_6-Female", - "v2/ru_speaker_7-Male", - "v2/ru_speaker_8-Male", - "v2/ru_speaker_9-Female", - "v2/es_speaker_0-Male", - "v2/es_speaker_1-Male", - "v2/es_speaker_2-Male", - "v2/es_speaker_3-Male", - "v2/es_speaker_4-Male", - "v2/es_speaker_5-Male", - "v2/es_speaker_6-Male", - "v2/es_speaker_7-Male", - "v2/es_speaker_8-Female", - "v2/es_speaker_9-Female", - "v2/tr_speaker_0-Male", - "v2/tr_speaker_1-Male", - "v2/tr_speaker_2-Male", - "v2/tr_speaker_3-Male", - "v2/tr_speaker_4-Female", - "v2/tr_speaker_5-Female", - "v2/tr_speaker_6-Male", - "v2/tr_speaker_7-Male", - "v2/tr_speaker_8-Male", - "v2/tr_speaker_9-Male", -] - -EDGE_VOICES = [ - "af-ZA-AdriNeural-Female", - "af-ZA-WillemNeural-Male", - "sq-AL-AnilaNeural-Female", - "sq-AL-IlirNeural-Male", - "am-ET-AmehaNeural-Male", - "am-ET-MekdesNeural-Female", - "ar-DZ-AminaNeural-Female", - "ar-DZ-IsmaelNeural-Male", - "ar-BH-AliNeural-Male", - "ar-BH-LailaNeural-Female", - "ar-EG-SalmaNeural-Female", - "ar-EG-ShakirNeural-Male", - "ar-IQ-BasselNeural-Male", - "ar-IQ-RanaNeural-Female", - "ar-JO-SanaNeural-Female", - "ar-JO-TaimNeural-Male", - "ar-KW-FahedNeural-Male", - "ar-KW-NouraNeural-Female", - "ar-LB-LaylaNeural-Female", - "ar-LB-RamiNeural-Male", - "ar-LY-ImanNeural-Female", - "ar-LY-OmarNeural-Male", - "ar-MA-JamalNeural-Male", - "ar-MA-MounaNeural-Female", - "ar-OM-AbdullahNeural-Male", - "ar-OM-AyshaNeural-Female", - "ar-QA-AmalNeural-Female", - "ar-QA-MoazNeural-Male", - "ar-SA-HamedNeural-Male", - "ar-SA-ZariyahNeural-Female", - "ar-SY-AmanyNeural-Female", - "ar-SY-LaithNeural-Male", - "ar-TN-HediNeural-Male", - "ar-TN-ReemNeural-Female", - "ar-AE-FatimaNeural-Female", - "ar-AE-HamdanNeural-Male", - "ar-YE-MaryamNeural-Female", - "ar-YE-SalehNeural-Male", - "az-AZ-BabekNeural-Male", - "az-AZ-BanuNeural-Female", - "bn-BD-NabanitaNeural-Female", - "bn-BD-PradeepNeural-Male", - "bn-IN-BashkarNeural-Male", - "bn-IN-TanishaaNeural-Female", - "bs-BA-GoranNeural-Male", - "bs-BA-VesnaNeural-Female", - "bg-BG-BorislavNeural-Male", - "bg-BG-KalinaNeural-Female", - "my-MM-NilarNeural-Female", - "my-MM-ThihaNeural-Male", - "ca-ES-EnricNeural-Male", - "ca-ES-JoanaNeural-Female", - "zh-HK-HiuGaaiNeural-Female", - "zh-HK-HiuMaanNeural-Female", - "zh-HK-WanLungNeural-Male", - "zh-CN-XiaoxiaoNeural-Female", - "zh-CN-XiaoyiNeural-Female", - "zh-CN-YunjianNeural-Male", - "zh-CN-YunxiNeural-Male", - "zh-CN-YunxiaNeural-Male", - "zh-CN-YunyangNeural-Male", - "zh-CN-liaoning-XiaobeiNeural-Female", - "zh-TW-HsiaoChenNeural-Female", - "zh-TW-YunJheNeural-Male", - "zh-TW-HsiaoYuNeural-Female", - "zh-CN-shaanxi-XiaoniNeural-Female", - "hr-HR-GabrijelaNeural-Female", - "hr-HR-SreckoNeural-Male", - "cs-CZ-AntoninNeural-Male", - "cs-CZ-VlastaNeural-Female", - "da-DK-ChristelNeural-Female", - "da-DK-JeppeNeural-Male", - "nl-BE-ArnaudNeural-Male", - "nl-BE-DenaNeural-Female", - "nl-NL-ColetteNeural-Female", - "nl-NL-FennaNeural-Female", - "nl-NL-MaartenNeural-Male", - "en-AU-NatashaNeural-Female", - "en-AU-WilliamNeural-Male", - "en-CA-ClaraNeural-Female", - "en-CA-LiamNeural-Male", - "en-HK-SamNeural-Male", - "en-HK-YanNeural-Female", - "en-IN-NeerjaExpressiveNeural-Female", - "en-IN-NeerjaNeural-Female", - "en-IN-PrabhatNeural-Male", - "en-IE-ConnorNeural-Male", - "en-IE-EmilyNeural-Female", - "en-KE-AsiliaNeural-Female", - "en-KE-ChilembaNeural-Male", - "en-NZ-MitchellNeural-Male", - "en-NZ-MollyNeural-Female", - "en-NG-AbeoNeural-Male", - "en-NG-EzinneNeural-Female", - "en-PH-JamesNeural-Male", - "en-PH-RosaNeural-Female", - "en-SG-LunaNeural-Female", - "en-SG-WayneNeural-Male", - "en-ZA-LeahNeural-Female", - "en-ZA-LukeNeural-Male", - "en-TZ-ElimuNeural-Male", - "en-TZ-ImaniNeural-Female", - "en-GB-LibbyNeural-Female", - "en-GB-MaisieNeural-Female", - "en-GB-RyanNeural-Male", - "en-GB-SoniaNeural-Female", - "en-GB-ThomasNeural-Male", - "en-US-AriaNeural-Female", - "en-US-AnaNeural-Female", - "en-US-ChristopherNeural-Male", - "en-US-EricNeural-Male", - "en-US-GuyNeural-Male", - "en-US-JennyNeural-Female", - "en-US-MichelleNeural-Female", - "en-US-RogerNeural-Male", - "en-US-SteffanNeural-Male", - "et-EE-AnuNeural-Female", - "et-EE-KertNeural-Male", - "fil-PH-AngeloNeural-Male", - "fil-PH-BlessicaNeural-Female", - "fi-FI-HarriNeural-Male", - "fi-FI-NooraNeural-Female", - "fr-BE-CharlineNeural-Female", - "fr-BE-GerardNeural-Male", - "fr-CA-AntoineNeural-Male", - "fr-CA-JeanNeural-Male", - "fr-CA-SylvieNeural-Female", - "fr-FR-DeniseNeural-Female", - "fr-FR-EloiseNeural-Female", - "fr-FR-HenriNeural-Male", - "fr-CH-ArianeNeural-Female", - "fr-CH-FabriceNeural-Male", - "gl-ES-RoiNeural-Male", - "gl-ES-SabelaNeural-Female", - "ka-GE-EkaNeural-Female", - "ka-GE-GiorgiNeural-Male", - "de-AT-IngridNeural-Female", - "de-AT-JonasNeural-Male", - "de-DE-AmalaNeural-Female", - "de-DE-ConradNeural-Male", - "de-DE-KatjaNeural-Female", - "de-DE-KillianNeural-Male", - "de-CH-JanNeural-Male", - "de-CH-LeniNeural-Female", - "el-GR-AthinaNeural-Female", - "el-GR-NestorasNeural-Male", - "gu-IN-DhwaniNeural-Female", - "gu-IN-NiranjanNeural-Male", - "he-IL-AvriNeural-Male", - "he-IL-HilaNeural-Female", - "hi-IN-MadhurNeural-Male", - "hi-IN-SwaraNeural-Female", - "hu-HU-NoemiNeural-Female", - "hu-HU-TamasNeural-Male", - "is-IS-GudrunNeural-Female", - "is-IS-GunnarNeural-Male", - "id-ID-ArdiNeural-Male", - "id-ID-GadisNeural-Female", - "ga-IE-ColmNeural-Male", - "ga-IE-OrlaNeural-Female", - "it-IT-DiegoNeural-Male", - "it-IT-ElsaNeural-Female", - "it-IT-IsabellaNeural-Female", - "ja-JP-KeitaNeural-Male", - "ja-JP-NanamiNeural-Female", - "jv-ID-DimasNeural-Male", - "jv-ID-SitiNeural-Female", - "kn-IN-GaganNeural-Male", - "kn-IN-SapnaNeural-Female", - "kk-KZ-AigulNeural-Female", - "kk-KZ-DauletNeural-Male", - "km-KH-PisethNeural-Male", - "km-KH-SreymomNeural-Female", - "ko-KR-InJoonNeural-Male", - "ko-KR-SunHiNeural-Female", - "lo-LA-ChanthavongNeural-Male", - "lo-LA-KeomanyNeural-Female", - "lv-LV-EveritaNeural-Female", - "lv-LV-NilsNeural-Male", - "lt-LT-LeonasNeural-Male", - "lt-LT-OnaNeural-Female", - "mk-MK-AleksandarNeural-Male", - "mk-MK-MarijaNeural-Female", - "ms-MY-OsmanNeural-Male", - "ms-MY-YasminNeural-Female", - "ml-IN-MidhunNeural-Male", - "ml-IN-SobhanaNeural-Female", - "mt-MT-GraceNeural-Female", - "mt-MT-JosephNeural-Male", - "mr-IN-AarohiNeural-Female", - "mr-IN-ManoharNeural-Male", - "mn-MN-BataaNeural-Male", - "mn-MN-YesuiNeural-Female", - "ne-NP-HemkalaNeural-Female", - "ne-NP-SagarNeural-Male", - "nb-NO-FinnNeural-Male", - "nb-NO-PernilleNeural-Female", - "ps-AF-GulNawazNeural-Male", - "ps-AF-LatifaNeural-Female", - "fa-IR-DilaraNeural-Female", - "fa-IR-FaridNeural-Male", - "pl-PL-MarekNeural-Male", - "pl-PL-ZofiaNeural-Female", - "pt-BR-AntonioNeural-Male", - "pt-BR-FranciscaNeural-Female", - "pt-PT-DuarteNeural-Male", - "pt-PT-RaquelNeural-Female", - "ro-RO-AlinaNeural-Female", - "ro-RO-EmilNeural-Male", - "ru-RU-DmitryNeural-Male", - "ru-RU-SvetlanaNeural-Female", - "sr-RS-NicholasNeural-Male", - "sr-RS-SophieNeural-Female", - "si-LK-SameeraNeural-Male", - "si-LK-ThiliniNeural-Female", - "sk-SK-LukasNeural-Male", - "sk-SK-ViktoriaNeural-Female", - "sl-SI-PetraNeural-Female", - "sl-SI-RokNeural-Male", - "so-SO-MuuseNeural-Male", - "so-SO-UbaxNeural-Female", - "es-AR-ElenaNeural-Female", - "es-AR-TomasNeural-Male", - "es-BO-MarceloNeural-Male", - "es-BO-SofiaNeural-Female", - "es-CL-CatalinaNeural-Female", - "es-CL-LorenzoNeural-Male", - "es-CO-GonzaloNeural-Male", - "es-CO-SalomeNeural-Female", - "es-CR-JuanNeural-Male", - "es-CR-MariaNeural-Female", - "es-CU-BelkysNeural-Female", - "es-CU-ManuelNeural-Male", - "es-DO-EmilioNeural-Male", - "es-DO-RamonaNeural-Female", - "es-EC-AndreaNeural-Female", - "es-EC-LuisNeural-Male", - "es-SV-LorenaNeural-Female", - "es-SV-RodrigoNeural-Male", - "es-GQ-JavierNeural-Male", - "es-GQ-TeresaNeural-Female", - "es-GT-AndresNeural-Male", - "es-GT-MartaNeural-Female", - "es-HN-CarlosNeural-Male", - "es-HN-KarlaNeural-Female", - "es-MX-DaliaNeural-Female", - "es-MX-JorgeNeural-Male", - "es-NI-FedericoNeural-Male", - "es-NI-YolandaNeural-Female", - "es-PA-MargaritaNeural-Female", - "es-PA-RobertoNeural-Male", - "es-PY-MarioNeural-Male", - "es-PY-TaniaNeural-Female", - "es-PE-AlexNeural-Male", - "es-PE-CamilaNeural-Female", - "es-PR-KarinaNeural-Female", - "es-PR-VictorNeural-Male", - "es-ES-AlvaroNeural-Male", - "es-ES-ElviraNeural-Female", - "es-US-AlonsoNeural-Male", - "es-US-PalomaNeural-Female", - "es-UY-MateoNeural-Male", - "es-UY-ValentinaNeural-Female", - "es-VE-PaolaNeural-Female", - "es-VE-SebastianNeural-Male", - "su-ID-JajangNeural-Male", - "su-ID-TutiNeural-Female", - "sw-KE-RafikiNeural-Male", - "sw-KE-ZuriNeural-Female", - "sw-TZ-DaudiNeural-Male", - "sw-TZ-RehemaNeural-Female", - "sv-SE-MattiasNeural-Male", - "sv-SE-SofieNeural-Female", - "ta-IN-PallaviNeural-Female", - "ta-IN-ValluvarNeural-Male", - "ta-MY-KaniNeural-Female", - "ta-MY-SuryaNeural-Male", - "ta-SG-AnbuNeural-Male", - "ta-SG-VenbaNeural-Female", - "ta-LK-KumarNeural-Male", - "ta-LK-SaranyaNeural-Female", - "te-IN-MohanNeural-Male", - "te-IN-ShrutiNeural-Female", - "th-TH-NiwatNeural-Male", - "th-TH-PremwadeeNeural-Female", - "tr-TR-AhmetNeural-Male", - "tr-TR-EmelNeural-Female", - "uk-UA-OstapNeural-Male", - "uk-UA-PolinaNeural-Female", - "ur-IN-GulNeural-Female", - "ur-IN-SalmanNeural-Male", - "ur-PK-AsadNeural-Male", - "ur-PK-UzmaNeural-Female", - "uz-UZ-MadinaNeural-Female", - "uz-UZ-SardorNeural-Male", - "vi-VN-HoaiMyNeural-Female", - "vi-VN-NamMinhNeural-Male", - "cy-GB-AledNeural-Male", - "cy-GB-NiaNeural-Female", - "zu-ZA-ThandoNeural-Female", - "zu-ZA-ThembaNeural-Male", -] diff --git a/spaces/katanaml-org/sparrow-ui/tools/agstyler.py b/spaces/katanaml-org/sparrow-ui/tools/agstyler.py deleted file mode 100644 index 35f674fa6c68450b3d91375d18ca7b5705ab7274..0000000000000000000000000000000000000000 --- a/spaces/katanaml-org/sparrow-ui/tools/agstyler.py +++ /dev/null @@ -1,77 +0,0 @@ -# adjusted from: https://github.com/nryabykh/streamlit-aggrid-hints - -from st_aggrid import AgGrid -from st_aggrid.grid_options_builder import GridOptionsBuilder -from st_aggrid.shared import GridUpdateMode, JsCode - - -def get_numeric_style_with_precision(precision: int) -> dict: - return {"type": ["numericColumn", "customNumericFormat"], "precision": precision} - - -PRECISION_ZERO = get_numeric_style_with_precision(0) -PRECISION_ONE = get_numeric_style_with_precision(1) -PRECISION_TWO = get_numeric_style_with_precision(2) -PINLEFT = {"pinned": "left"} - - -def draw_grid( - df, - formatter: dict = None, - selection="multiple", - use_checkbox=False, - fit_columns=False, - pagination_size=0, - theme="streamlit", - wrap_text: bool = False, - auto_height: bool = False, - grid_options: dict = None, - key=None, - css: dict = None -): - - gb = GridOptionsBuilder() - gb.configure_default_column( - filterable=True, - groupable=False, - editable=False, - wrapText=wrap_text, - autoHeight=auto_height - ) - - if grid_options is not None: - gb.configure_grid_options(**grid_options) - - for latin_name, (cyr_name, style_dict) in formatter.items(): - gb.configure_column(latin_name, header_name=cyr_name, **style_dict) - - gb.configure_selection(selection_mode=selection, use_checkbox=use_checkbox) - - if pagination_size > 0: - gb.configure_pagination(enabled=True, paginationAutoPageSize=False, paginationPageSize=pagination_size) - - return AgGrid( - df, - gridOptions=gb.build(), - update_mode=GridUpdateMode.SELECTION_CHANGED | GridUpdateMode.VALUE_CHANGED, - allow_unsafe_jscode=True, - fit_columns_on_grid_load=fit_columns, - theme=theme, - key=key, - custom_css=css, - enable_enterprise_modules=False - ) - - -def highlight(color, condition): - code = f""" - function(params) {{ - color = "{color}"; - if ({condition}) {{ - return {{ - 'backgroundColor': color - }} - }} - }}; - """ - return JsCode(code) diff --git a/spaces/kdrkdrkdr/ProsekaTTS/models.py b/spaces/kdrkdrkdr/ProsekaTTS/models.py deleted file mode 100644 index fe004e94bbe9074ec736f14325268f4515a53420..0000000000000000000000000000000000000000 --- a/spaces/kdrkdrkdr/ProsekaTTS/models.py +++ /dev/null @@ -1,540 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2]) - logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - - if self.n_vocab != 0: - self.emb = nn.Embedding(n_vocab, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths): - if self.n_vocab != 0: - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, - gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, - gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - if use_sdp: - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - else: - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid=None): - - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 1: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), - s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - if self.use_sdp: - l_length = self.dp(x, x_mask, w, g=g) - l_length = l_length / torch.sum(x_mask) - else: - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 1: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, - 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:, :, :max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) - - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 1, "n_speakers have to be larger than 1." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, y_mask, (z, z_p, z_hat) diff --git a/spaces/keras-io/randaugment/app.py b/spaces/keras-io/randaugment/app.py deleted file mode 100644 index 9d2b291d472e3240f162aa0afcf38d7521e660ad..0000000000000000000000000000000000000000 --- a/spaces/keras-io/randaugment/app.py +++ /dev/null @@ -1,38 +0,0 @@ -import numpy as np -import tensorflow as tf -from huggingface_hub import from_pretrained_keras -import gradio as gr - -IMAGE_SIZE = 72 -# labels taken from https://huggingface.co/datasets/cifar10 -labels = {0: "airplane", - 1: "automobile", - 2: "bird", - 3: "cat", - 4: "deer", - 5: "dog", - 6: "frog", - 7: "horse", - 8: "ship", - 9: "truck"} - - -model = from_pretrained_keras("keras-io/randaugment") - - - -def predict_img_label(img): - inp = tf.image.resize(img, (IMAGE_SIZE, IMAGE_SIZE)) - pred = model.predict(tf.expand_dims(inp, 0)).flatten() - return {labels[i]: float(pred[i]) for i in range(len(labels))} - -image = gr.inputs.Image() -label = gr.outputs.Label(num_top_classes=3) - -title = "Image Classification Model Using RandAugment" -description = "Upload an image to classify images" - -article = "" -gr.Interface(predict_img_label, inputs=image, outputs=label, allow_flagging=False, - examples = [['./airplane.jpg'], ['./car.png'], ['./cat.jpg'], ['./horse.jpg']], - title=title, description=description, article=article).launch(enable_queue=True) diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/models/arcface_torch/configs/glint360k_r34.py b/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/models/arcface_torch/configs/glint360k_r34.py deleted file mode 100644 index fda2701758a839a7161d09c25f0ca3d26033baff..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/src/face3d/models/arcface_torch/configs/glint360k_r34.py +++ /dev/null @@ -1,26 +0,0 @@ -from easydict import EasyDict as edict - -# make training faster -# our RAM is 256G -# mount -t tmpfs -o size=140G tmpfs /train_tmp - -config = edict() -config.loss = "cosface" -config.network = "r34" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 1.0 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 5e-4 -config.batch_size = 128 -config.lr = 0.1 # batch size is 512 - -config.rec = "/train_tmp/glint360k" -config.num_classes = 360232 -config.num_image = 17091657 -config.num_epoch = 20 -config.warmup_epoch = -1 -config.decay_epoch = [8, 12, 15, 18] -config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/spaces/kevinwang676/SadTalker/src/face3d/models/arcface_torch/utils/__init__.py b/spaces/kevinwang676/SadTalker/src/face3d/models/arcface_torch/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/default_runtime.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/default_runtime.py deleted file mode 100644 index b564cc4e7e7d9a67dacaaddecb100e4d8f5c005b..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/default_runtime.py +++ /dev/null @@ -1,14 +0,0 @@ -# yapf:disable -log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook', by_epoch=False), - # dict(type='TensorboardLoggerHook') - ]) -# yapf:enable -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = None -resume_from = None -workflow = [('train', 1)] -cudnn_benchmark = True diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/ops/saconv.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/ops/saconv.py deleted file mode 100644 index b4ee3978e097fca422805db4e31ae481006d7971..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmcv/ops/saconv.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F - -from annotator.uniformer.mmcv.cnn import CONV_LAYERS, ConvAWS2d, constant_init -from annotator.uniformer.mmcv.ops.deform_conv import deform_conv2d -from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version - - -@CONV_LAYERS.register_module(name='SAC') -class SAConv2d(ConvAWS2d): - """SAC (Switchable Atrous Convolution) - - This is an implementation of SAC in DetectoRS - (https://arxiv.org/pdf/2006.02334.pdf). - - Args: - in_channels (int): Number of channels in the input image - out_channels (int): Number of channels produced by the convolution - kernel_size (int or tuple): Size of the convolving kernel - stride (int or tuple, optional): Stride of the convolution. Default: 1 - padding (int or tuple, optional): Zero-padding added to both sides of - the input. Default: 0 - padding_mode (string, optional): ``'zeros'``, ``'reflect'``, - ``'replicate'`` or ``'circular'``. Default: ``'zeros'`` - dilation (int or tuple, optional): Spacing between kernel elements. - Default: 1 - groups (int, optional): Number of blocked connections from input - channels to output channels. Default: 1 - bias (bool, optional): If ``True``, adds a learnable bias to the - output. Default: ``True`` - use_deform: If ``True``, replace convolution with deformable - convolution. Default: ``False``. - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - bias=True, - use_deform=False): - super().__init__( - in_channels, - out_channels, - kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups, - bias=bias) - self.use_deform = use_deform - self.switch = nn.Conv2d( - self.in_channels, 1, kernel_size=1, stride=stride, bias=True) - self.weight_diff = nn.Parameter(torch.Tensor(self.weight.size())) - self.pre_context = nn.Conv2d( - self.in_channels, self.in_channels, kernel_size=1, bias=True) - self.post_context = nn.Conv2d( - self.out_channels, self.out_channels, kernel_size=1, bias=True) - if self.use_deform: - self.offset_s = nn.Conv2d( - self.in_channels, - 18, - kernel_size=3, - padding=1, - stride=stride, - bias=True) - self.offset_l = nn.Conv2d( - self.in_channels, - 18, - kernel_size=3, - padding=1, - stride=stride, - bias=True) - self.init_weights() - - def init_weights(self): - constant_init(self.switch, 0, bias=1) - self.weight_diff.data.zero_() - constant_init(self.pre_context, 0) - constant_init(self.post_context, 0) - if self.use_deform: - constant_init(self.offset_s, 0) - constant_init(self.offset_l, 0) - - def forward(self, x): - # pre-context - avg_x = F.adaptive_avg_pool2d(x, output_size=1) - avg_x = self.pre_context(avg_x) - avg_x = avg_x.expand_as(x) - x = x + avg_x - # switch - avg_x = F.pad(x, pad=(2, 2, 2, 2), mode='reflect') - avg_x = F.avg_pool2d(avg_x, kernel_size=5, stride=1, padding=0) - switch = self.switch(avg_x) - # sac - weight = self._get_weight(self.weight) - zero_bias = torch.zeros( - self.out_channels, device=weight.device, dtype=weight.dtype) - - if self.use_deform: - offset = self.offset_s(avg_x) - out_s = deform_conv2d(x, offset, weight, self.stride, self.padding, - self.dilation, self.groups, 1) - else: - if (TORCH_VERSION == 'parrots' - or digit_version(TORCH_VERSION) < digit_version('1.5.0')): - out_s = super().conv2d_forward(x, weight) - elif digit_version(TORCH_VERSION) >= digit_version('1.8.0'): - # bias is a required argument of _conv_forward in torch 1.8.0 - out_s = super()._conv_forward(x, weight, zero_bias) - else: - out_s = super()._conv_forward(x, weight) - ori_p = self.padding - ori_d = self.dilation - self.padding = tuple(3 * p for p in self.padding) - self.dilation = tuple(3 * d for d in self.dilation) - weight = weight + self.weight_diff - if self.use_deform: - offset = self.offset_l(avg_x) - out_l = deform_conv2d(x, offset, weight, self.stride, self.padding, - self.dilation, self.groups, 1) - else: - if (TORCH_VERSION == 'parrots' - or digit_version(TORCH_VERSION) < digit_version('1.5.0')): - out_l = super().conv2d_forward(x, weight) - elif digit_version(TORCH_VERSION) >= digit_version('1.8.0'): - # bias is a required argument of _conv_forward in torch 1.8.0 - out_l = super()._conv_forward(x, weight, zero_bias) - else: - out_l = super()._conv_forward(x, weight) - - out = switch * out_s + (1 - switch) * out_l - self.padding = ori_p - self.dilation = ori_d - # post-context - avg_x = F.adaptive_avg_pool2d(out, output_size=1) - avg_x = self.post_context(avg_x) - avg_x = avg_x.expand_as(out) - out = out + avg_x - return out diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/m2m_100/process_data/clean_histogram.py b/spaces/koajoel/PolyFormer/fairseq/examples/m2m_100/process_data/clean_histogram.py deleted file mode 100644 index e24e073dc0eb43c76e2ce717f52bb848c5b026b8..0000000000000000000000000000000000000000 --- a/spaces/koajoel/PolyFormer/fairseq/examples/m2m_100/process_data/clean_histogram.py +++ /dev/null @@ -1,52 +0,0 @@ -import argparse - -parser = argparse.ArgumentParser() -parser.add_argument('--src', type=str, help='Source language') -parser.add_argument('--tgt', type=str, help='Target language') -parser.add_argument('--src-file', type=str, help='Input source file') -parser.add_argument('--tgt-file', type=str, help='Input target file') -parser.add_argument('--src-output-file', type=str, help='Output source file') -parser.add_argument('--tgt-output-file', type=str, help='Output target file') -parser.add_argument('--threshold', type=float, default=0.5, help='Threshold') -parser.add_argument('--threshold-character', type=str, default=']', help='Threshold character') -parser.add_argument('--histograms', type=str, help='Path to histograms') - -args = parser.parse_args() - - -def read_hist(f): - ch = [] - for line in f: - c = line[0] - if c == args.threshold_character: - break - ch.append(c) - return ch - - -with(open("{}/{}".format(args.histograms, args.src), 'r', encoding='utf8')) as f: - ch1 = read_hist(f) - -with(open("{}/{}".format(args.histograms, args.tgt), 'r', encoding='utf8')) as f: - ch2 = read_hist(f) - -print("Accepted characters for {}: {}".format(args.src, ch1)) -print("Accepted characters for {}: {}".format(args.tgt, ch2)) - -with open(args.src_file, 'r', encoding='utf8') as fs1, open(args.tgt_file, 'r', encoding='utf8') as fs2, open(args.src_output_file, 'w', encoding='utf8') as fos1, open(args.tgt_output_file, 'w', encoding='utf8') as fos2: - ls1 = fs1.readline() - ls2 = fs2.readline() - - while ls1 or ls2: - cnt1 = len([c for c in ls1.strip() if c in ch1]) - cnt2 = len([c for c in ls2.strip() if c in ch2]) - - if cnt1 / len(ls1) > args.threshold and cnt2 / len(ls2) > args.threshold: - fos1.write(ls1) - fos2.write(ls2) - else: - print("{} {} {} \n{} {} {}".format(args.src, cnt1 / len(ls1), ls1.strip(), args.tgt, cnt2 / len(ls2), ls2.strip())) - - ls1 = fs1.readline() - ls2 = fs2.readline() - \ No newline at end of file diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/speech_recognition/new/__init__.py b/spaces/koajoel/PolyFormer/fairseq/examples/speech_recognition/new/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-6d2143a6.js b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-6d2143a6.js deleted file mode 100644 index e473ab23813cf35e4b2d0924923c4a42fde5151b..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-6d2143a6.js +++ /dev/null @@ -1,7 +0,0 @@ -import{c as F,e as I,s as ce,N as me,t as c,P as _e,g as Ue,T as E,p as Qe,h as J,E as v,b as se,j as Ze,k as Ge,l as Ve,m as Ke,f as Je,i as Ye,n as We,o as et,q as ne,r as tt}from"./index-aa084753.js";import{html as rt}from"./index-cfae41de.js";import"./index-8c3da1d9.js";import"./Blocks-6ad6f005.js";import"./Button-62634b34.js";import"./BlockLabel-98ef75ee.js";import"./Empty-5d52e655.js";/* empty css */import"./Copy-fd383441.js";import"./Download-dfb06e25.js";import"./index-767254b1.js";import"./index-26c6ce60.js";import"./index-0f59eac9.js";class X{constructor(e,r,s,n,i,o,a){this.type=e,this.value=r,this.from=s,this.hash=n,this.end=i,this.children=o,this.positions=a,this.hashProp=[[I.contextHash,n]]}static create(e,r,s,n,i){let o=n+(n<<8)+e+(r<<4)|0;return new X(e,r,s,o,i,[],[])}addChild(e,r){e.prop(I.contextHash)!=this.hash&&(e=new E(e.type,e.children,e.positions,e.length,this.hashProp)),this.children.push(e),this.positions.push(r)}toTree(e,r=this.end){let s=this.children.length-1;return s>=0&&(r=Math.max(r,this.positions[s]+this.children[s].length+this.from)),new E(e.types[this.type],this.children,this.positions,r-this.from).balance({makeTree:(i,o,a)=>new E(F.none,i,o,a,this.hashProp)})}}var f;(function(t){t[t.Document=1]="Document",t[t.CodeBlock=2]="CodeBlock",t[t.FencedCode=3]="FencedCode",t[t.Blockquote=4]="Blockquote",t[t.HorizontalRule=5]="HorizontalRule",t[t.BulletList=6]="BulletList",t[t.OrderedList=7]="OrderedList",t[t.ListItem=8]="ListItem",t[t.ATXHeading1=9]="ATXHeading1",t[t.ATXHeading2=10]="ATXHeading2",t[t.ATXHeading3=11]="ATXHeading3",t[t.ATXHeading4=12]="ATXHeading4",t[t.ATXHeading5=13]="ATXHeading5",t[t.ATXHeading6=14]="ATXHeading6",t[t.SetextHeading1=15]="SetextHeading1",t[t.SetextHeading2=16]="SetextHeading2",t[t.HTMLBlock=17]="HTMLBlock",t[t.LinkReference=18]="LinkReference",t[t.Paragraph=19]="Paragraph",t[t.CommentBlock=20]="CommentBlock",t[t.ProcessingInstructionBlock=21]="ProcessingInstructionBlock",t[t.Escape=22]="Escape",t[t.Entity=23]="Entity",t[t.HardBreak=24]="HardBreak",t[t.Emphasis=25]="Emphasis",t[t.StrongEmphasis=26]="StrongEmphasis",t[t.Link=27]="Link",t[t.Image=28]="Image",t[t.InlineCode=29]="InlineCode",t[t.HTMLTag=30]="HTMLTag",t[t.Comment=31]="Comment",t[t.ProcessingInstruction=32]="ProcessingInstruction",t[t.URL=33]="URL",t[t.HeaderMark=34]="HeaderMark",t[t.QuoteMark=35]="QuoteMark",t[t.ListMark=36]="ListMark",t[t.LinkMark=37]="LinkMark",t[t.EmphasisMark=38]="EmphasisMark",t[t.CodeMark=39]="CodeMark",t[t.CodeText=40]="CodeText",t[t.CodeInfo=41]="CodeInfo",t[t.LinkTitle=42]="LinkTitle",t[t.LinkLabel=43]="LinkLabel"})(f||(f={}));class st{constructor(e,r){this.start=e,this.content=r,this.marks=[],this.parsers=[]}}class nt{constructor(){this.text="",this.baseIndent=0,this.basePos=0,this.depth=0,this.markers=[],this.pos=0,this.indent=0,this.next=-1}forward(){this.basePos>this.pos&&this.forwardInner()}forwardInner(){let e=this.skipSpace(this.basePos);this.indent=this.countIndent(e,this.pos,this.indent),this.pos=e,this.next=e==this.text.length?-1:this.text.charCodeAt(e)}skipSpace(e){return N(this.text,e)}reset(e){for(this.text=e,this.baseIndent=this.basePos=this.pos=this.indent=0,this.forwardInner(),this.depth=1;this.markers.length;)this.markers.pop()}moveBase(e){this.basePos=e,this.baseIndent=this.countIndent(e,this.pos,this.indent)}moveBaseColumn(e){this.baseIndent=e,this.basePos=this.findColumn(e)}addMarker(e){this.markers.push(e)}countIndent(e,r=0,s=0){for(let n=r;n=e.stack[r.depth+1].value+r.baseIndent)return!0;if(r.indent>=r.baseIndent+4)return!1;let s=(t.type==f.OrderedList?ee:W)(r,e,!1);return s>0&&(t.type!=f.BulletList||Y(r,e,!1)<0)&&r.text.charCodeAt(r.pos+s-1)==t.value}const ge={[f.Blockquote](t,e,r){return r.next!=62?!1:(r.markers.push(m(f.QuoteMark,e.lineStart+r.pos,e.lineStart+r.pos+1)),r.moveBase(r.pos+(C(r.text.charCodeAt(r.pos+1))?2:1)),t.end=e.lineStart+r.text.length,!0)},[f.ListItem](t,e,r){return r.indent-1?!1:(r.moveBaseColumn(r.baseIndent+t.value),!0)},[f.OrderedList]:ie,[f.BulletList]:ie,[f.Document](){return!0}};function C(t){return t==32||t==9||t==10||t==13}function N(t,e=0){for(;er&&C(t.charCodeAt(e-1));)e--;return e}function ke(t){if(t.next!=96&&t.next!=126)return-1;let e=t.pos+1;for(;e-1&&t.depth==e.stack.length||s<3?-1:1}function be(t,e){for(let r=t.stack.length-1;r>=0;r--)if(t.stack[r].type==e)return!0;return!1}function W(t,e,r){return(t.next==45||t.next==43||t.next==42)&&(t.pos==t.text.length-1||C(t.text.charCodeAt(t.pos+1)))&&(!r||be(e,f.BulletList)||t.skipSpace(t.pos+2)=48&&n<=57;){s++;if(s==t.text.length)return-1;n=t.text.charCodeAt(s)}return s==t.pos||s>t.pos+9||n!=46&&n!=41||st.pos+1||t.next!=49)?-1:s+1-t.pos}function Se(t){if(t.next!=35)return-1;let e=t.pos+1;for(;e6?-1:r}function we(t){if(t.next!=45&&t.next!=61||t.indent>=t.baseIndent+4)return-1;let e=t.pos+1;for(;e/,Ae=/\?>/,Z=[[/^<(?:script|pre|style)(?:\s|>|$)/i,/<\/(?:script|pre|style)>/i],[/^\s*/i.exec(s);if(i)return t.append(m(f.Comment,r,r+1+i[0].length));let o=/^\?[^]*?\?>/.exec(s);if(o)return t.append(m(f.ProcessingInstruction,r,r+1+o[0].length));let a=/^(?:![A-Z][^]*?>|!\[CDATA\[[^]*?\]\]>|\/\s*[a-zA-Z][\w-]*\s*>|\s*[a-zA-Z][\w-]*(\s+[a-zA-Z:_][\w-.:]*(?:\s*=\s*(?:[^\s"'=<>`]+|'[^']*'|"[^"]*"))?)*\s*(\/\s*)?>)/.exec(s);return a?t.append(m(f.HTMLTag,r,r+1+a[0].length)):-1},Emphasis(t,e,r){if(e!=95&&e!=42)return-1;let s=r+1;for(;t.char(s)==e;)s++;let n=t.slice(r-1,r),i=t.slice(s,s+1),o=R.test(n),a=R.test(i),l=/\s|^$/.test(n),h=/\s|^$/.test(i),u=!h&&(!a||l||o),p=!l&&(!o||h||a),d=u&&(e==42||!p||o),L=p&&(e==42||!u||a);return t.append(new A(e==95?He:Pe,r,s,(d?1:0)|(L?2:0)))},HardBreak(t,e,r){if(e==92&&t.char(r+1)==10)return t.append(m(f.HardBreak,r,r+2));if(e==32){let s=r+1;for(;t.char(s)==32;)s++;if(t.char(s)==10&&s>=r+2)return t.append(m(f.HardBreak,r,s+1))}return-1},Link(t,e,r){return e==91?t.append(new A(P,r,r+1,1)):-1},Image(t,e,r){return e==33&&t.char(r+1)==91?t.append(new A(le,r,r+2,1)):-1},LinkEnd(t,e,r){if(e!=93)return-1;for(let s=t.parts.length-1;s>=0;s--){let n=t.parts[s];if(n instanceof A&&(n.type==P||n.type==le)){if(!n.side||t.skipSpace(n.to)==r&&!/[(\[]/.test(t.slice(r+1,r+2)))return t.parts[s]=null,-1;let i=t.takeContent(s),o=t.parts[s]=ut(t,i,n.type==P?f.Link:f.Image,n.from,r+1);if(n.type==P)for(let a=0;ae?m(f.URL,e+r,i+r):i==t.length?null:!1}}function Ne(t,e,r){let s=t.charCodeAt(e);if(s!=39&&s!=34&&s!=40)return!1;let n=s==40?41:s;for(let i=e+1,o=!1;i=this.end?-1:this.text.charCodeAt(e-this.offset)}get end(){return this.offset+this.text.length}slice(e,r){return this.text.slice(e-this.offset,r-this.offset)}append(e){return this.parts.push(e),e.to}addDelimiter(e,r,s,n,i){return this.append(new A(e,r,s,(n?1:0)|(i?2:0)))}addElement(e){return this.append(e)}resolveMarkers(e){for(let s=e;s=e;l--){let g=this.parts[l];if(g instanceof A&&g.side&1&&g.type==n.type&&!(i&&(n.side&1||g.side&2)&&(g.to-g.from+o)%3==0&&((g.to-g.from)%3||o%3))){a=g;break}}if(!a)continue;let h=n.type.resolve,u=[],p=a.from,d=n.to;if(i){let g=Math.min(2,a.to-a.from,o);p=a.to-g,d=n.from+g,h=g==1?"Emphasis":"StrongEmphasis"}a.type.mark&&u.push(this.elt(a.type.mark,p,a.to));for(let g=l+1;g=0;r--){let s=this.parts[r];if(s instanceof A&&s.type==e)return r}return null}takeContent(e){let r=this.resolveMarkers(e);return this.parts.length=e,r}skipSpace(e){return N(this.text,e-this.offset)+this.offset}elt(e,r,s,n){return typeof e=="string"?m(this.parser.getNodeType(e),r,s,n):new Me(e,r)}}function V(t,e){if(!e.length)return t;if(!t.length)return e;let r=t.slice(),s=0;for(let n of e){for(;s(e?e-1:0))return!1;if(this.fragmentEnd<0){let i=this.fragment.to;for(;i>0&&this.input.read(i-1,i)!=` -`;)i--;this.fragmentEnd=i?i-1:0}let s=this.cursor;s||(s=this.cursor=this.fragment.tree.cursor(),s.firstChild());let n=e+this.fragment.offset;for(;s.to<=n;)if(!s.parent())return!1;for(;;){if(s.from>=n)return this.fragment.from<=r;if(!s.childAfter(n))return!1}}matches(e){let r=this.cursor.tree;return r&&r.prop(I.contextHash)==e}takeNodes(e){let r=this.cursor,s=this.fragment.offset,n=this.fragmentEnd-(this.fragment.openEnd?1:0),i=e.absoluteLineStart,o=i,a=e.block.children.length,l=o,h=a;for(;;){if(r.to-s>n){if(r.type.isAnonymous&&r.firstChild())continue;break}if(e.dontInject.add(r.tree),e.addNode(r.tree,r.from-s),r.type.is("Block")&&(pt.indexOf(r.type.id)<0?(o=r.to-s,a=e.block.children.length):(o=l,a=h,l=r.to-s,h=e.block.children.length)),!r.nextSibling())break}for(;e.block.children.length>a;)e.block.children.pop(),e.block.positions.pop();return o-i}}const mt=ce({"Blockquote/...":c.quote,HorizontalRule:c.contentSeparator,"ATXHeading1/... SetextHeading1/...":c.heading1,"ATXHeading2/... SetextHeading2/...":c.heading2,"ATXHeading3/...":c.heading3,"ATXHeading4/...":c.heading4,"ATXHeading5/...":c.heading5,"ATXHeading6/...":c.heading6,"Comment CommentBlock":c.comment,Escape:c.escape,Entity:c.character,"Emphasis/...":c.emphasis,"StrongEmphasis/...":c.strong,"Link/... Image/...":c.link,"OrderedList/... BulletList/...":c.list,"BlockQuote/...":c.quote,"InlineCode CodeText":c.monospace,URL:c.url,"HeaderMark HardBreak QuoteMark ListMark LinkMark EmphasisMark CodeMark":c.processingInstruction,"CodeInfo LinkLabel":c.labelName,LinkTitle:c.string,Paragraph:c.content}),gt=new j(new me(Ee).extend(mt),Object.keys(z).map(t=>z[t]),Object.keys(z).map(t=>at[t]),Object.keys(z),lt,ge,Object.keys(_).map(t=>_[t]),Object.keys(_),[]);function kt(t,e,r){let s=[];for(let n=t.firstChild,i=e;;n=n.nextSibling){let o=n?n.from:r;if(o>i&&s.push({from:i,to:o}),!n)break;i=n.to}return s}function Lt(t){let{codeParser:e,htmlParser:r}=t;return{wrap:Qe((n,i)=>{let o=n.type.id;if(e&&(o==f.CodeBlock||o==f.FencedCode)){let a="";if(o==f.FencedCode){let h=n.node.getChild(f.CodeInfo);h&&(a=i.read(h.from,h.to))}let l=e(a);if(l)return{parser:l,overlay:h=>h.type.id==f.CodeText}}else if(r&&(o==f.HTMLBlock||o==f.HTMLTag))return{parser:r,overlay:kt(n.node,n.from,n.to)};return null})}}const bt={resolve:"Strikethrough",mark:"StrikethroughMark"},St={defineNodes:[{name:"Strikethrough",style:{"Strikethrough/...":c.strikethrough}},{name:"StrikethroughMark",style:c.processingInstruction}],parseInline:[{name:"Strikethrough",parse(t,e,r){if(e!=126||t.char(r+1)!=126||t.char(r+2)==126)return-1;let s=t.slice(r-1,r),n=t.slice(r+2,r+3),i=/\s|^$/.test(s),o=/\s|^$/.test(n),a=R.test(s),l=R.test(n);return t.addDelimiter(bt,r,r+2,!o&&(!l||i||a),!i&&(!a||o||l))},after:"Emphasis"}]};function y(t,e,r=0,s,n=0){let i=0,o=!0,a=-1,l=-1,h=!1,u=()=>{s.push(t.elt("TableCell",n+a,n+l,t.parser.parseInline(e.slice(a,l),n+a)))};for(let p=r;p-1)&&i++,o=!1,s&&(a>-1&&u(),s.push(t.elt("TableDelimiter",p+n,p+n+1))),a=l=-1):(h||d!=32&&d!=9)&&(a<0&&(a=p),l=p+1),h=!h&&d==92}return a>-1&&(i++,s&&u()),i}function fe(t,e){for(let r=e;rn instanceof ue)||!fe(e.text,e.basePos))return!1;let s=t.scanLine(t.absoluteLineEnd+1).text;return Oe.test(s)&&y(t,e.text,e.basePos)==y(t,s,e.basePos)},before:"SetextHeading"}]};class Ct{nextLine(){return!1}finish(e,r){return e.addLeafElement(r,e.elt("Task",r.start,r.start+r.content.length,[e.elt("TaskMarker",r.start,r.start+3),...e.parser.parseInline(r.content.slice(3),r.start+3)])),!0}}const At={defineNodes:[{name:"Task",block:!0,style:c.list},{name:"TaskMarker",style:c.atom}],parseBlock:[{name:"TaskList",leaf(t,e){return/^\[[ xX]\]/.test(e.content)&&t.parentType().name=="ListItem"?new Ct:null},after:"SetextHeading"}]},xt=[wt,At,St];function Re(t,e,r){return(s,n,i)=>{if(n!=t||s.char(i+1)==t)return-1;let o=[s.elt(r,i,i+1)];for(let a=i+1;a"}}),Te=new I,De=gt.configure({props:[Je.add(t=>!t.is("Block")||t.is("Document")||K(t)!=null?void 0:(e,r)=>({from:r.doc.lineAt(e.from).to,to:e.to})),Te.add(K),Ye.add({Document:()=>null}),We.add({Document:ze})]});function K(t){let e=/^(?:ATX|Setext)Heading(\d)$/.exec(t.name);return e?+e[1]:void 0}function Mt(t,e){let r=t;for(;;){let s=r.nextSibling,n;if(!s||(n=K(s.type))!=null&&n<=e)break;r=s}return r.to}const Ht=et.of((t,e,r)=>{for(let s=J(t).resolveInner(r,-1);s&&!(s.fromr)return{from:r,to:i}}return null});function te(t){return new Ve(ze,t,[Ht],"markdown")}const Pt=te(De),vt=De.configure([xt,Et,Bt,It]),Xe=te(vt);function Nt(t,e){return r=>{if(r&&t){let s=null;if(r=/\S*/.exec(r)[0],typeof t=="function"?s=t(r):s=ne.matchLanguageName(t,r,!0),s instanceof ne)return s.support?s.support.language.parser:tt.getSkippingParser(s.load());if(s)return s.parser}return e?e.parser:null}}class D{constructor(e,r,s,n,i,o,a){this.node=e,this.from=r,this.to=s,this.spaceBefore=n,this.spaceAfter=i,this.type=o,this.item=a}blank(e,r=!0){let s=this.spaceBefore+(this.node.name=="Blockquote"?">":"");if(e!=null){for(;s.length0;n--)s+=" ";return s+(r?this.spaceAfter:"")}}marker(e,r){let s=this.node.name=="OrderedList"?String(+je(this.item,e)[2]+r):"";return this.spaceBefore+s+this.type+this.spaceAfter}}function Fe(t,e){let r=[];for(let n=t;n&&n.name!="Document";n=n.parent)(n.name=="ListItem"||n.name=="Blockquote"||n.name=="FencedCode")&&r.push(n);let s=[];for(let n=r.length-1;n>=0;n--){let i=r[n],o,a=e.lineAt(i.from),l=i.from-a.from;if(i.name=="FencedCode")s.push(new D(i,l,l,"","","",null));else if(i.name=="Blockquote"&&(o=/^[ \t]*>( ?)/.exec(a.text.slice(l))))s.push(new D(i,l,l+o[0].length,"",o[1],">",null));else if(i.name=="ListItem"&&i.parent.name=="OrderedList"&&(o=/^([ \t]*)\d+([.)])([ \t]*)/.exec(a.text.slice(l)))){let h=o[3],u=o[0].length;h.length>=4&&(h=h.slice(0,h.length-4),u-=4),s.push(new D(i.parent,l,l+u,o[1],h,o[2],i))}else if(i.name=="ListItem"&&i.parent.name=="BulletList"&&(o=/^([ \t]*)([-+*])([ \t]{1,4}\[[ xX]\])?([ \t]+)/.exec(a.text.slice(l)))){let h=o[4],u=o[0].length;h.length>4&&(h=h.slice(0,h.length-4),u-=4);let p=o[2];o[3]&&(p+=o[3].replace(/[xX]/," ")),s.push(new D(i.parent,l,l+u,o[1],h,p,i))}}return s}function je(t,e){return/^(\s*)(\d+)(?=[.)])/.exec(e.sliceString(t.from,t.from+10))}function U(t,e,r,s=0){for(let n=-1,i=t;;){if(i.name=="ListItem"){let a=je(i,e),l=+a[2];if(n>=0){if(l!=n+1)return;r.push({from:i.from+a[1].length,to:i.from+a[0].length,insert:String(n+2+s)})}n=l}let o=i.nextSibling;if(!o)break;i=o}}const yt=({state:t,dispatch:e})=>{let r=J(t),{doc:s}=t,n=null,i=t.changeByRange(o=>{if(!o.empty||!Xe.isActiveAt(t,o.from))return n={range:o};let a=o.from,l=s.lineAt(a),h=Fe(r.resolveInner(a,-1),s);for(;h.length&&h[h.length-1].from>a-l.from;)h.pop();if(!h.length)return n={range:o};let u=h[h.length-1];if(u.to-u.spaceAfter.length>a-l.from)return n={range:o};let p=a>=u.to-u.spaceAfter.length&&!/\S/.test(l.text.slice(u.to));if(u.item&&p)if(u.node.firstChild.to>=a||l.from>0&&!/[^\s>]/.test(s.lineAt(l.from-1).text)){let k=h.length>1?h[h.length-2]:null,b,w="";k&&k.item?(b=l.from+k.from,w=k.marker(s,1)):b=l.from+(k?k.to:0);let x=[{from:b,to:a,insert:w}];return u.node.name=="OrderedList"&&U(u.item,s,x,-2),k&&k.node.name=="OrderedList"&&U(k.item,s,x),{range:v.cursor(b+w.length),changes:x}}else{let k="";for(let b=0,w=h.length-2;b<=w;b++)k+=h[b].blank(b\s*$/.exec(k.text);if(b&&b.index==u.from){let w=t.changes([{from:k.from+b.index,to:k.to},{from:l.from+u.from,to:l.to}]);return{range:o.map(w),changes:w}}}let d=[];u.node.name=="OrderedList"&&U(u.item,s,d);let L=u.item&&u.item.from]*/.exec(l.text)[0].length>=u.to)for(let k=0,b=h.length-1;k<=b;k++)S+=k==b&&!L?h[k].marker(s,1):h[k].blank(kl.from&&/\s/.test(l.text.charAt(g-l.from-1));)g--;return S=t.lineBreak+S,d.push({from:g,to:a,insert:S}),{range:v.cursor(g+S.length),changes:d}});return n?!1:(e(t.update(i,{scrollIntoView:!0,userEvent:"input"})),!0)};function de(t){return t.name=="QuoteMark"||t.name=="ListMark"}function Ot(t,e){let r=t.resolveInner(e,-1),s=e;de(r)&&(s=r.from,r=r.parent);for(let n;n=r.childBefore(s);)if(de(n))s=n.from;else if(n.name=="OrderedList"||n.name=="BulletList")r=n.lastChild,s=r.to;else break;return r}const Rt=({state:t,dispatch:e})=>{let r=J(t),s=null,n=t.changeByRange(i=>{let o=i.from,{doc:a}=t;if(i.empty&&Xe.isActiveAt(t,i.from)){let l=a.lineAt(o),h=Fe(Ot(r,o),a);if(h.length){let u=h[h.length-1],p=u.to-u.spaceAfter.length+(u.spaceAfter?1:0);if(o-l.from>p&&!/\S/.test(l.text.slice(p,o-l.from)))return{range:v.cursor(l.from+p),changes:{from:l.from+p,to:o}};if(o-l.from==p){let d=l.from+u.from;if(u.item&&u.node.fromEl Libro Rojo De Las Matematicas: Un Texto de Referencia para Estudiantes de Ingeniería -

      El Libro Rojo De Las Matematicas es un libro escrito por el profesor Moises Villena, que abarca los temas fundamentales de las matemáticas básicas para estudiantes de ingeniería. El libro se divide en 29 capítulos, que van desde los números reales hasta las ecuaciones diferenciales, pasando por el álgebra lineal, el cálculo diferencial e integral, las funciones especiales, las series de Fourier y las transformadas de Laplace. Cada capítulo contiene una introducción teórica, ejemplos resueltos, ejercicios propuestos y soluciones.

      -

      El Libro Rojo De Las Matematicas Moises Villena Pdf 29


      Downloadhttps://bytlly.com/2uGwk8



      -

      El Libro Rojo De Las Matematicas es una publicación digital gratuita que se puede descargar en formato pdf desde la Biblioteca Digital Espol[^3^]. El libro tiene 575 páginas y está escrito en español. El autor, Moises Villena, es un profesor de matemáticas de la Escuela Superior Politécnica del Litoral (ESPOL) en Ecuador, y también es conocido por su canal de YouTube "Matemáticas con Moisés", donde explica diversos temas de matemáticas con un enfoque didáctico y ameno.

      -

      El Libro Rojo De Las Matematicas es un texto de referencia para los estudiantes de ingeniería que quieren reforzar sus conocimientos de matemáticas o prepararse para exámenes como el prepolitécnico de la ESPOL. El libro ofrece una exposición clara y rigurosa de los conceptos matemáticos, así como una gran variedad de ejercicios para practicar y mejorar las habilidades de cálculo y razonamiento. El libro también es útil para los profesores de matemáticas que buscan un material didáctico y actualizado para sus clases.

      - -

      El Libro Rojo De Las Matematicas no solo es un libro de texto, sino también una obra que invita a la reflexión y al disfrute de las matemáticas. El autor incorpora en el libro algunas citas, anécdotas y curiosidades sobre la historia y la filosofía de las matemáticas, así como algunos problemas y desafíos que estimulan el pensamiento crítico y creativo. El libro también muestra la belleza y la utilidad de las matemáticas en diversas áreas de la ciencia, la tecnología y la vida cotidiana.

      -

      El Libro Rojo De Las Matematicas es, en definitiva, un libro que vale la pena leer y consultar, tanto para los estudiantes como para los amantes de las matemáticas. El libro es un testimonio del talento y la pasión del profesor Moises Villena por enseñar y difundir las matemáticas de una manera accesible y entretenida. El libro es también un recurso valioso para el aprendizaje autodidacta y el desarrollo profesional de los futuros ingenieros.

      -

      - -

      Para concluir, El Libro Rojo De Las Matematicas es un libro que merece ser leído y recomendado por todos los que quieren aprender o enseñar matemáticas de una manera efectiva y divertida. El libro es un excelente material de apoyo para los estudiantes de ingeniería que quieren dominar las matemáticas básicas y aplicarlas a sus carreras. El libro es también una fuente de inspiración y motivación para los profesores de matemáticas que quieren innovar y mejorar sus métodos de enseñanza. El libro es, finalmente, una obra que celebra y comparte el placer y el poder de las matemáticas.

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/lixq/bingo61/src/lib/bots/bing/sr.ts b/spaces/lixq/bingo61/src/lib/bots/bing/sr.ts deleted file mode 100644 index 7cae14da7362bd6cc1e234851c11ca67e5a99f0c..0000000000000000000000000000000000000000 --- a/spaces/lixq/bingo61/src/lib/bots/bing/sr.ts +++ /dev/null @@ -1,106 +0,0 @@ -// @ts-ignore -const SpeechRecognitionPolyfill: typeof webkitSpeechRecognition = typeof window !== 'undefined' ? ( - // @ts-ignore - window.SpeechRecognition || - window.webkitSpeechRecognition || - // @ts-ignore - window.mozSpeechRecognition || - // @ts-ignore - window.msSpeechRecognition || - // @ts-ignore - window.oSpeechRecognition -) as typeof webkitSpeechRecognition : undefined - -type subscriber = (msg: string, command?: string) => void - -export class SR { - recognition?: SpeechRecognition - onchange?: subscriber - transcript: boolean = false - listening: boolean = false - private commandsRe?: RegExp - constructor(commands: string[]) { - this.recognition = SpeechRecognitionPolyfill ? new SpeechRecognitionPolyfill() : undefined - if (!this.recognition) { - return - } - this.configuration('zh-CN') - if (commands.length) { - this.commandsRe = new RegExp(`^(${commands.join('|')})。?$`) - } - this.recognition.onresult = this.speechRecognition - this.recognition.onerror = (err) => { - console.log('err', err.error) - this.stop() - } - this.recognition.onend = () => { - if (this.recognition && this.listening) { - this.recognition.start() - } - } - } - - speechRecognition = (event: SpeechRecognitionEvent) => { - if (!this.listening) return - for (var i = event.resultIndex; i < event.results.length; i++) { - let result = event.results[i] - if (result.isFinal) { - var alt = result[0] - const text = alt.transcript.trim() - if (this.commandsRe && this.commandsRe.test(text)) { - return this.onchange?.('', RegExp.$1) - } - if (!this.transcript) return - this.onchange?.(text) - } - } - } - - private configuration = async (lang: string = 'zh-CN') => { - return new Promise((resolve) => { - if (this.recognition) { - this.recognition.continuous = true - this.recognition.lang = lang - this.recognition.onstart = resolve - } - }) - } - - start = async () => { - if (this.recognition && !this.listening) { - await this.recognition.start() - this.transcript = true - this.listening = true - } - } - - stop = () => { - if (this.recognition) { - this.recognition.stop() - this.transcript = false - this.listening = false - } - } - - - pause = () => { - if (this.recognition) { - this.transcript = false - } - } - - resume = () => { - if (this.recognition) { - this.transcript = true - } - } - - abort = () => { - if (this.recognition && this.transcript) { - this.recognition.abort() - this.transcript = false - this.listening = false - } - } -} - diff --git a/spaces/lpnguyen/continuous-discrete-time/README.md b/spaces/lpnguyen/continuous-discrete-time/README.md deleted file mode 100644 index d32e6d06b8f6e9e9b07fb938491cd791277fbe61..0000000000000000000000000000000000000000 --- a/spaces/lpnguyen/continuous-discrete-time/README.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Continuous Discrete Time -emoji: 📚 -colorFrom: red -colorTo: purple -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -# Continuous vs discrete time model -This demo shows the difference between continuous and discrete time model, using the simple exponential growth model. - - -The demo is used in the course "Population and Evolutionary Dynamics" (SBL.20032) at the Université de Fribourg, Switzerland. \ No newline at end of file diff --git a/spaces/ltgoslo/ssa-perin/data/parser/json_parser.py b/spaces/ltgoslo/ssa-perin/data/parser/json_parser.py deleted file mode 100644 index 504105bd77c69e5ae9ed3bfb27b641c381ccda01..0000000000000000000000000000000000000000 --- a/spaces/ltgoslo/ssa-perin/data/parser/json_parser.py +++ /dev/null @@ -1,35 +0,0 @@ -from functools import reduce -from data.field.mini_torchtext.example import Example - - -def example_from_json(obj, fields): - ex = Example() - for key, vals in fields.items(): - if vals is not None: - if not isinstance(vals, list): - vals = [vals] - for val in vals: - # for processing the key likes 'foo.bar' - name, field = val - ks = key.split(".") - - def reducer(obj, key): - if isinstance(obj, list): - results = [] - for data in obj: - if key not in data: - # key error - raise ValueError("Specified key {} was not found in " "the input data".format(key)) - else: - results.append(data[key]) - return results - else: - # key error - if key not in obj: - raise ValueError("Specified key {} was not found in " "the input data".format(key)) - else: - return obj[key] - - v = reduce(reducer, ks, obj) - setattr(ex, name, field.preprocess(v)) - return ex diff --git a/spaces/ma-xu/LIVE/thrust/dependencies/cub/cmake/CubHeaderTesting.cmake b/spaces/ma-xu/LIVE/thrust/dependencies/cub/cmake/CubHeaderTesting.cmake deleted file mode 100644 index 45f20ce5f3b130a76e00bdbacfd7fc00784ba758..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/dependencies/cub/cmake/CubHeaderTesting.cmake +++ /dev/null @@ -1,29 +0,0 @@ -# For every public header, build a translation unit containing `#include
      ` -# to let the compiler try to figure out warnings in that header if it is not otherwise -# included in tests, and also to verify if the headers are modular enough. -# .inl files are not globbed for, because they are not supposed to be used as public -# entrypoints. - -file(GLOB_RECURSE headers - RELATIVE "${CUB_SOURCE_DIR}/cub" - CONFIGURE_DEPENDS - cub/*.cuh -) - -set(headertest_srcs) -foreach (header IN LISTS headers) - set(headertest_src "headers/${header}.cu") - configure_file("${CUB_SOURCE_DIR}/cmake/header_test.in" "${headertest_src}") - list(APPEND headertest_srcs "${headertest_src}") -endforeach() - -foreach(cub_target IN LISTS CUB_TARGETS) - cub_get_target_property(config_prefix ${cub_target} PREFIX) - - set(headertest_target ${config_prefix}.headers) - add_library(${headertest_target} OBJECT ${headertest_srcs}) - target_link_libraries(${headertest_target} PUBLIC ${cub_target}) - cub_clone_target_properties(${headertest_target} ${cub_target}) - - add_dependencies(${config_prefix}.all ${headertest_target}) -endforeach() diff --git a/spaces/maknee/minigpt4.cpp/app.py b/spaces/maknee/minigpt4.cpp/app.py deleted file mode 100644 index 5a58d3e6e8c8c70d00fbbaea8b5a24fd46f5816d..0000000000000000000000000000000000000000 --- a/spaces/maknee/minigpt4.cpp/app.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -import sys -import ctypes -import pathlib -from typing import Optional, List -import enum -from pathlib import Path -import argparse -import gradio as gr - -import minigpt4_library - -from huggingface_hub import hf_hub_download - -model_path = hf_hub_download(repo_id='maknee/minigpt4-13b-ggml', filename='minigpt4-13B-f16.bin', repo_type='dataset') -llm_model_path = hf_hub_download(repo_id='maknee/ggml-vicuna-v0-quantized', filename='ggml-vicuna-13B-v0-q5_k.bin', repo_type='dataset') - -title = """

      MiniGPT-4.cpp Demo

      """ -description = """

      This is the demo of MiniGPT-4 with ggml (cpu only!). Upload your images and start chatting!

      """ -article = """
      """ - -global minigpt4_chatbot -minigpt4_chatbot: minigpt4_library.MiniGPT4ChatBot - -def user(message, history): - history = history or [] - # Append the user's message to the conversation history - history.append([message, ""]) - return "", history - -def chat(history, limit: int = 1024, temp: float = 0.8, top_k: int = 40, top_p: float = 0.9, repeat_penalty: float = 1.1): - history = history or [] - - message = history[-1][0] - - history[-1][1] = "" - for output in minigpt4_chatbot.generate( - message, - limit = int(limit), - temp = float(temp), - top_k = int(top_k), - top_p = float(top_p), - ): - answer = output - history[-1][1] += answer - # stream the response - yield history, history - -def clear_state(history, chat_message, image): - history = [] - minigpt4_chatbot.reset_chat() - return history, gr.update(value=None, interactive=True), gr.update(placeholder='Upload image first', interactive=False), gr.update(value="Upload & Start Chat", interactive=True) - -def upload_image(image, history): - if image is None: - return None, None, gr.update(interactive=True), history - history = [] - minigpt4_chatbot.upload_image(image.convert('RGB')) - return gr.update(interactive=False), gr.update(interactive=True, placeholder='Type and press Enter'), gr.update(value="Start Chatting", interactive=False), history - -def start(): - with gr.Blocks() as demo: - gr.Markdown(title) - gr.Markdown(description) - gr.Markdown(article) - - with gr.Row(): - with gr.Column(scale=0.5): - image = gr.Image(type="pil") - upload_button = gr.Button(value="Upload & Start Chat", interactive=True, variant="primary") - - max_tokens = gr.Slider(1, 1024, label="Max Tokens", step=1, value=128) - temperature = gr.Slider(0.0, 1.0, label="Temperature", step=0.05, value=0.8) - top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.95) - top_k = gr.Slider(0, 100, label="Top K", step=1, value=40) - repeat_penalty = gr.Slider(0.0, 2.0, label="Repetition Penalty", step=0.1, value=1.1) - - with gr.Column(): - chatbot = gr.Chatbot(label='MiniGPT-4') - message = gr.Textbox(label='User', placeholder='Upload image first', interactive=False) - history = gr.State() - - with gr.Row(): - submit = gr.Button(value="Send message", variant="secondary").style(full_width=True) - clear = gr.Button(value="Reset", variant="secondary").style(full_width=False) - # stop = gr.Button(value="Stop", variant="secondary").style(full_width=False) - - clear.click(clear_state, inputs=[history, image, message], outputs=[history, image, message, upload_button], queue=False) - - upload_button.click(upload_image, inputs=[image, history], outputs=[image, message, upload_button, history]) - - submit_click_event = submit.click( - fn=user, inputs=[message, history], outputs=[message, history], queue=True - ).then( - fn=chat, inputs=[history, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, history], queue=True - ) - message_submit_event = message.submit( - fn=user, inputs=[message, history], outputs=[message, history], queue=True - ).then( - fn=chat, inputs=[history, max_tokens, temperature, top_p, top_k, repeat_penalty], outputs=[chatbot, history], queue=True - ) - # stop.click(fn=None, inputs=None, outputs=None, cancels=[submit_click_event, message_submit_event], queue=False) - - demo.launch(enable_queue=True) - -minigpt4_chatbot = minigpt4_library.MiniGPT4ChatBot(model_path, llm_model_path, verbosity=minigpt4_library.Verbosity.SILENT) -start() diff --git a/spaces/manhkhanhUIT/Image_Restoration_Colorization/Global/options/train_options.py b/spaces/manhkhanhUIT/Image_Restoration_Colorization/Global/options/train_options.py deleted file mode 100644 index 6cc3296657043568a3a961d793f2c69f568bab1a..0000000000000000000000000000000000000000 --- a/spaces/manhkhanhUIT/Image_Restoration_Colorization/Global/options/train_options.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -from .base_options import BaseOptions - -class TrainOptions(BaseOptions): - def initialize(self): - BaseOptions.initialize(self) - # for displays - self.parser.add_argument('--display_freq', type=int, default=100, help='frequency of showing training results on screen') - self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') - self.parser.add_argument('--save_latest_freq', type=int, default=10000, help='frequency of saving the latest results') - self.parser.add_argument('--save_epoch_freq', type=int, default=1, help='frequency of saving checkpoints at the end of epochs') - self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/') - self.parser.add_argument('--debug', action='store_true', help='only do one epoch and displays at each iteration') - - # for training - self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') - # self.parser.add_argument('--load_pretrain', type=str, default='', help='load the pretrained model from the specified location') - self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') - self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') - self.parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate') - self.parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero') - self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') - self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') - self.parser.add_argument('--training_dataset',type=str,default='',help='training use which dataset') - - # for discriminators - self.parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to use') - self.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers') - self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer') - self.parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss') - self.parser.add_argument('--l2_feat', type=float, help='weight for feature mapping loss') - self.parser.add_argument('--use_l1_feat', action='store_true', help='use l1 for feat mapping') - self.parser.add_argument('--no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss') - self.parser.add_argument('--no_vgg_loss', action='store_true', help='if specified, do *not* use VGG feature matching loss') - self.parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN') - self.parser.add_argument('--gan_type', type=str, default='lsgan', help='Choose the loss type of GAN') - self.parser.add_argument('--pool_size', type=int, default=0, help='the size of image buffer that stores previously generated images') - self.parser.add_argument('--norm_D',type=str, default='spectralinstance', help='instance normalization or batch normalization') - self.parser.add_argument('--init_D',type=str,default='xavier',help='normal|xavier|xavier_uniform|kaiming|orthogonal|none') - - self.parser.add_argument('--no_TTUR',action='store_true',help='No TTUR') - - self.parser.add_argument('--start_epoch',type=int,default=-1,help='write the start_epoch of iter.txt into this parameter') - self.parser.add_argument('--no_degradation',action='store_true',help='when train the mapping, enable this parameter --> no degradation will be added into clean image') - self.parser.add_argument('--no_load_VAE',action='store_true',help='when train the mapping, enable this parameter --> random initialize the encoder an decoder') - self.parser.add_argument('--use_v2_degradation',action='store_true',help='enable this parameter --> 4 kinds of degradations will be used to synthesize corruption') - self.parser.add_argument('--use_vae_which_epoch',type=str,default='200') - - - self.parser.add_argument('--use_focal_loss',action='store_true') - - self.parser.add_argument('--mask_need_scale',action='store_true',help='enable this param means that the pixel range of mask is 0-255') - self.parser.add_argument('--positive_weight',type=float,default=1.0,help='(For scratch detection) Since the scratch number is less, and we use a weight strategy. This parameter means that we want to decrease the weight.') - - self.parser.add_argument('--no_update_lr',action='store_true',help='use this means we do not update the LR while training') - - - self.isTrain = True diff --git a/spaces/mascIT/AgeGuesser/yolov5/detect.py b/spaces/mascIT/AgeGuesser/yolov5/detect.py deleted file mode 100644 index 1eb19ef0e46fc45477d4aa0f6ef3ce5bbdafa8fe..0000000000000000000000000000000000000000 --- a/spaces/mascIT/AgeGuesser/yolov5/detect.py +++ /dev/null @@ -1,138 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license - -import os -import sys -from pathlib import Path -import cv2 - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[0] # YOLOv5 root directory -if str(ROOT) not in sys.path: - sys.path.append(str(ROOT)) # add ROOT to PATH - - -ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative - -import torch -from yolov5.utils.torch_utils import select_device, time_sync -from yolov5.utils.plots import Annotator, colors, save_one_box -from yolov5.utils.general import (check_img_size, - increment_path, non_max_suppression, scale_coords, xyxy2xywh) -from yolov5.utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, pil_to_cv -from yolov5.models.common import DetectMultiBackend -import torchvision -import numpy as np - -test_transforms = torchvision.transforms.Compose([ - torchvision.transforms.ToPILImage(), - torchvision.transforms.transforms.ToTensor(), - torchvision.transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), - torchvision.transforms.Resize((224, 224)), -]) - - -test_random_transforms = torchvision.transforms.Compose([ - torchvision.transforms.ToPILImage(), - torchvision.transforms.transforms.ToTensor(), - torchvision.transforms.RandomRotation((-15, 15)), - torchvision.transforms.RandomGrayscale(p=0.4), - torchvision.transforms.RandomPerspective(0.4, p=0.4), - torchvision.transforms.RandomAdjustSharpness(2), - torchvision.transforms.RandomAffine(degrees=0, translate=None, scale=(0.9, 1.0)), - torchvision.transforms.RandomHorizontalFlip(), - torchvision.transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), - torchvision.transforms.Resize((224, 224)), -]) - -def load_yolo_model(weights, device="cpu", imgsz=[1280, 1280]): - # Load model - device = select_device(device) - model = DetectMultiBackend(weights, device=device, dnn=False, data=ROOT / 'data/coco128.yaml') - stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine - imgsz = check_img_size(imgsz, s=stride) # check image size - - half = False - # Half - half &= (pt or jit or onnx or engine) and device.type != 'cpu' # FP16 supported on limited backends with CUDA - if pt or jit: - model.model.half() if half else model.model.float() - model.warmup(imgsz=(1, 3, *imgsz), half=half) - - return model, stride, names, pt, jit, onnx, engine - - -def predict( - - age_model, - model, # model.pt path(s) - stride, - source=None, # PIL Image - imgsz=(640, 640), # inference size (height, width) - conf_thres=0.5, # confidence threshold - iou_thres=0.45, # NMS IOU threshold - max_det=1000, # maximum detections per image - device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu - classes=None, # filter by class: --class 0, or --class 0 2 3 - agnostic_nms=False, # class-agnostic NMS - augment=False, # augmented inference - visualize=False, # visualize features - half=False, # use FP16 half-precision inference - with_random_augs = False - ): - - im, im0 = pil_to_cv(source, img_size=imgsz[0], stride=stride) - - im = torch.from_numpy(im).to(device) - im = im.half() if half else im.float() # uint8 to fp16/32 - im /= 255 # 0 - 255 to 0.0 - 1.0 - if len(im.shape) == 3: - im = im[None] # expand for batch dim - - # Inference - visualize = False - pred = model(im, augment=augment, visualize=visualize) - - # NMS - pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) - - # Process predictions - preds = [] - - for i, det in enumerate(pred): # per image - - # im0 = im0.copy() - - if len(det): - # Rescale boxes from img_size to im0 size - det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() - - for *xyxy, conf, _ in reversed(det): - - ages = [] - face = im0[int(xyxy[1]):int(xyxy[3]),int(xyxy[0]):int(xyxy[2])] - face_img = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) - - # inference with original crop - im = test_transforms(face_img).unsqueeze_(0) - - with torch.no_grad(): - y = age_model(im) - - age = y[0].item() - ages.append(age) - - if with_random_augs: - # inference with random augmentations - for k in range(12): - im = test_random_transforms(face_img).unsqueeze_(0) - - with torch.no_grad(): - y = age_model(im) - - age = y[0].item() - - ages.append(age) - - preds.append({"class": str(int( np.mean(np.array(ages), axis=0))), "xmin": int(xyxy[0]), "ymin": int(xyxy[1]), "xmax": int(xyxy[2]),"ymax": int(xyxy[3]), "conf": float(conf)}) - - return preds \ No newline at end of file diff --git a/spaces/matthoffner/chatbot-mini/types/chat.ts b/spaces/matthoffner/chatbot-mini/types/chat.ts deleted file mode 100644 index 1233f2cbe347464ba4937d7a0272ea533ded116b..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/chatbot-mini/types/chat.ts +++ /dev/null @@ -1,26 +0,0 @@ -import { OpenAIModel } from './openai'; - -export interface Message { - role: Role; - content: string; -} - -export type Role = 'assistant' | 'user'; - -export interface ChatBody { - model: OpenAIModel; - messages: Message[]; - key: string; - prompt: string; - temperature: number; -} - -export interface Conversation { - id: string; - name: string; - messages: Message[]; - model: OpenAIModel; - prompt: string; - temperature: number; - folderId: string | null; -} diff --git a/spaces/merve/anonymization/source/anonymization/make-estimates.js b/spaces/merve/anonymization/source/anonymization/make-estimates.js deleted file mode 100644 index 46ed3feaf1acaccf35153c3ebaf5b60094b21daf..0000000000000000000000000000000000000000 --- a/spaces/merve/anonymization/source/anonymization/make-estimates.js +++ /dev/null @@ -1,227 +0,0 @@ -window.makeEstimates = function(){ - var estimateScale = d3.scaleLinear() - .domain([.5 - .15, .5 + .15]).range([0, c.width]) - .interpolate(d3.interpolateRound) - - var jitterHeight = 90 - var rs = 4 // rect size - - var estimates = students[0].coinVals.map(d => ({val: .5, pctHead: .25, x: c.width/2, y: c.height - jitterHeight/2})) - var simulation = d3.forceSimulation(estimates) - .force('collide', d3.forceCollide(rs).strength(.1)) - .stop() - - function updateEstimates(){ - var selectedStudents = students.all.slice(0, sliders.population) - - selectedStudents[0].coinVals.map((_, i) => { - estimates[i].pctHead = d3.mean(selectedStudents, d => (d.coinVals[i] < sliders.headsProb) || d.plagerized) - - estimates[i].val = (1 - estimates[i].pctHead)/(1 - sliders.headsProb) - }) - updateSimulation(60) - } - updateEstimates() - - function updateSimulation(ticks=80, yStrength=.005){ - var variance = d3.variance(estimates, d => d.val) - var xStength = variance < .0005 ? .3 : .1 - - estimates.forEach(d => d.targetX = estimateScale(d.val)) - - simulation - .force('x', d3.forceX(d => d.targetX).strength(xStength)) - .force('y', d3.forceY(c.height - jitterHeight/2).strength(yStrength)) - .alpha(1) - // .alphaDecay(1 - Math.pow(0.001, 1/ticks)) - - for (var i = 0; i < ticks; ++i) simulation.tick() - - estimates.forEach(d => { - d.x = Math.round(d.x) - d.y = Math.round(d.y) - }) - } - updateSimulation(80, 1) - updateSimulation(80, .005) - - - // Set up DOM - var histogramSel = c.svg.append('g').translate([0, -25]) - var axisSel = histogramSel.append('g.axis.state.init-hidden') - var histogramAxis = axisSel.append('g') - - var numTicks = 6 - var xAxis = d3.axisTop(estimateScale).ticks(numTicks).tickFormat(d3.format('.0%')).tickSize(100) - - histogramAxis.call(xAxis).translate([.5, c.height + 5]) - middleTick = histogramAxis.selectAll('g').filter((d, i) => i === 3) - middleTick.select('text').classed('bold', 1) - middleTick.select('line').st({stroke: '#000'}) - - histogramAxis.append('text.bold') - .text('actual non-plagiarism rate') - .translate([c.width/2, 11]) - .st({fontSize: '10px'}) - - var containerSel = histogramSel.append('g#histogram').translate([0.5, .5]) - - - // Selection overlay to highlight individual estimates. - var selectSize = rs*2 + 2 - var selectColor = '#007276' - var rectFill = '#007276' - - var activeSel = histogramSel.append('g.active.init-hidden.axis') - .st({pointerEvents: 'none'}) - - activeSel.append('rect') - .at({width: selectSize, height: selectSize, stroke: selectColor, fill: 'none', strokeWidth: 3}) - .translate([-selectSize/2, -selectSize/2]) - - var activeTextHighlight = activeSel.append('rect') - .at({x: -32, width: 32*2, height: 18, y: -25, fill: 'rgba(255,255,255,.6)', rx: 10, ry: 10, xfill: 'red'}) - - var activeTextSel = activeSel.append('text.est-text.bold') - .text('34%') - .at({textAnchor: 'middle', textAnchor: 'middle', y: '-1em'}) - .st({fill: selectColor}) - - var activePathSel = activeSel.append('path') - .st({stroke: selectColor, strokeWidth: 3}) - - - // Update highlight DOM with current highlight - var curDrawData = {pctHead: .25, val: .5, x: c.width/2, y: c.height - jitterHeight/2} - function setActive(active, dur=0){ - if (active !== estimates.active){ - estimates.forEach(d => { - d.active = d == active - d.fy = d.active ? d.y : null - }) - estimates.active = active - } - - students.updateHeadsPos() - - - sel.flipCircle - .transition().duration(0).delay(d => d.i*5*(dur > 0 ? 1 : 0)) - .at({transform: d => slides && slides.curSlide && slides.curSlide.showFlipCircle && d.coinVals[active.index] < sliders.headsProb ? - 'scale(1)' : 'scale(.1)'}) - - - flipCoinTimer.stop() - if (dur){ - var objI = d3.interpolateObject(curDrawData, active) - - flipCoinTimer = d3.timer(ms => { - var t = d3.easeCubicInOut(d3.clamp(0, ms/dur, 1)) - drawData(objI(t)) - if (t == 1) flipCoinTimer.stop() - }) - } else{ - drawData(active) - } - - function drawData({pctHead, val, x, y}){ - activeSel.translate([x + rs/2, y + rs/2]) - activeTextSel.text('est. ' + d3.format('.1%')(val)) - activePathSel.at({d: `M ${selectSize/2*Math.sign(c.width/2 - x)} -1 H ${c.width/2 - x}`}) - - var error = Math.abs(val - .5) - var fmt = d3.format(".1%") - var pop = sliders.population - d3.select('.rand-text') - // .html(`${fmt(1 - pctHead)} of students said they had never plagerized. Since about half the students flipped heads and automatically reported plagerizism, we double that to estimate ${fmt(val)} of students haven't plagerized—${error > .1 ? '' : error > .07 ? 'a little ' : 'not '}far from the actual rate of ${fmt(.5)}`) - // .html(`${Math.round((1 - pctHead)*pop)} of ${pop} students said they had never plagiarized. Since about half the students flipped heads and automatically reported plagiarism, we double that rate to estimate ${fmt(val)} of students haven't plagiarized—${error > .4 ? '' : error > .07 ? 'a little ' : 'not '}far from the actual rate of ${fmt(.5)}`) - .html(`Here, ${fmt(1 - pctHead)} students said they had never plagiarized. Doubling that, we estimate ${fmt(val)} of students haven't plagiarized—${error > .1 ? 'quite ' : error > .07 ? 'a little ' : 'not '}far from the actual rate of ${fmt(.5)}`) - - curDrawData = {pctHead, val, x, y} - } - } - window.flipCoinTimer = d3.timer(d => d) - - - - var estimateSel = containerSel.appendMany('rect.estimate', estimates) - .at({width: rs, height: rs, stroke: '#fff', fill: rectFill, strokeWidth: .5}) - .st({fill: rectFill}) - .translate([rs/2, rs/2]) - .on('mouseover', (d, i) => { - if (window.slides.curSlide.showHistogram) { - setActive(d) - } - }) - - function setSelectorOpacity(textOpacity, strokeOpacity) { - activeTextSel.st({opacity: textOpacity}) - activeSel.st({opacity: strokeOpacity}) - activePathSel.st({opacity: strokeOpacity}) - } - - function render(transition=false){ - estimateSel.translate(d => [d.x, d.y]) - setActive(estimates.active) - - if (transition){ - if (window.flipAllCoinsTimer) window.flipAllCoinsTimer.stop() - window.flipAllCoinsTimer = d3.timer(ms => { - var t = d3.easeExpIn(d3.clamp(0, ms/5000, 1), 20) - if (flipAllCoinsTimer.forceEnd) t = 1 - - if (t > .028) { - setSelectorOpacity(textOpacity=0, strokeOpacity=0.7) - } - - var index = Math.floor((estimates.length - 2)*t) + 1 - estimateSel.classed('active', (d, i) => i <= index) - - setActive(estimates[index]) - // flipCoinsSel.text('Flip coins ' + d3.format('03')(index < 100 ? index : index + 1) + ' times') - flipCoinsSel.text('Flip coins 200 times') - - if (t == 1) { - flipAllCoinsTimer.stop() - setSelectorOpacity(textOpacity=1, strokeOpacity=1) - } - }) - } else { - setSelectorOpacity(textOpacity=1, strokeOpacity=1) - flipCoinsSel - } - } - window.flipAllCoinsTimer = d3.timer(d => d) - - - var flipCoinsSel = d3.select('.flip-coins').on('click', () => { - students.all.forEach(student => { - student.coinVals = student.coinVals.map(j => Math.random()) - }) - - updateEstimates() - render(true) - }) - - d3.select('.flip-coins-once').on('click', flipCoin) - function flipCoin(){ - active = estimates[0] - - students.all.forEach(student => { - student.coinVals = student.coinVals.map(j => Math.random()) - }) - - active.fy = active.y = c.height - jitterHeight/2 - updateEstimates() - - estimateSel.translate(d => [d.x, d.y]) - estimates.active = null - setActive(active, 1000) - } - - Object.assign(estimates, {updateEstimates, setActive, render, flipCoin, axisSel, containerSel, estimateSel, activeSel}) - - return estimates -} - -if (window.init) window.init() \ No newline at end of file diff --git a/spaces/merve/anonymization/source/uncertainty-calibration/graph-scroll.css b/spaces/merve/anonymization/source/uncertainty-calibration/graph-scroll.css deleted file mode 100644 index 2090579822fcb774883d54187371bc4a3440a395..0000000000000000000000000000000000000000 --- a/spaces/merve/anonymization/source/uncertainty-calibration/graph-scroll.css +++ /dev/null @@ -1,129 +0,0 @@ -#container{ - position: relative; - width: auto; - } - - #sections{ - width: 340px; - } - - #sections > div{ - background: white; - opacity: .2; - margin-bottom: 200px; - line-height: 1.4em; - transition: opacity .2s; - } - #sections > div:first-child{ - opacity: 1; - } - #sections > div:last-child{ - /*padding-bottom: 80vh;*/ - padding-bottom: 80px; - margin-bottom: 0px; - } - #sections > div:first-child > h1{ - padding-top: 40px; - } - - #sections > div.graph-scroll-active{ - opacity: 1; - } - - #graph{ - margin-left: 40px; - width: 500px; - position: -webkit-sticky; - position: sticky; - top: 0px; - float: right; - height: 800px; - font-family: sans-serif; - - } - - .slider{ - font-family: 'Google Sans', sans-serif; - } - - #sections h1{ - text-align: left !important; - } - - @media (max-width: 1000px) and (min-width: 926px){ - #sections{ - margin-left: 20px; - } - } - - @media (max-width: 925px) { - #container{ - margin-left: 0px; - } - - h1{ - margin-bottom: 0px; - } - - - #graph{ - width: 100%; - margin-left: 10px; - float: none; - max-width: 500px; - margin: 0px auto; - } - - #graph > div{ - position: relative; - top: 0px; - } - #sections{ - width: auto; - position: relative; - margin: 0px auto; - pointer-events: none; - } - #sections a{ - pointer-events: all; - } - - #sections > div{ - background: rgba(255,255,255,.9); - padding: 10px; - border-top: 1px solid; - border-bottom: 1px solid; - margin-bottom: 80vh; - width: calc(100vw - 20px); - margin-left: -5px; - } - - #sections > div > *{ - max-width: 750px; - } - .mini, .slider, i, .gated{ - margin: 0px auto; - } - - #sections > div:first-child{ - opacity: 1; - margin-top: -500px; - } - - #sections > div:last-child{ - padding-bottom: 0px; - margin-bottom: 0px; - } - - - #sections h1{ - margin: 10px; - padding-top: 0px !important; - } - - #sections h3{ - margin-top: .5em; - } - - } - \ No newline at end of file diff --git a/spaces/merve/fill-in-the-blank/public/anonymization/make-students.js b/spaces/merve/fill-in-the-blank/public/anonymization/make-students.js deleted file mode 100644 index 4406024eb9e398a4eaedc2b725eaf4a56e625e16..0000000000000000000000000000000000000000 --- a/spaces/merve/fill-in-the-blank/public/anonymization/make-students.js +++ /dev/null @@ -1,184 +0,0 @@ -window.makeStudents = function(){ - var seed = new Math.seedrandom('12fbsab56') - var rand = d3.randomUniform.source(seed)(0, 1) - - var ncols = 12 - - var allStudents = d3.range(756).map(i => { - var age = ages[Math.floor(rand()*ages.length)] - var state = states[Math.floor(rand()*states.length)] - var season = Math.floor(rand()*4) - var heads = rand() < .5 - - if (rand() < .1) state = 'NY' - if (rand() < .5 && state == 'RI') state = states[Math.floor(rand()*states.length)] - if (rand() < .5 && state == 'CT') state = states[Math.floor(rand()*states.length)] - - var coinVals = d3.range(300).map(rand).slice(0, 200) - - return {age, state, i, pos: {}, group: {}, season, heads, coinVals, isAdditionalStudent: true} - }) - - var students = allStudents.slice(0, 144) - students.forEach(student => student.isAdditionalStudent = false) - - students.all = allStudents - students.all.forEach((d, i) => { - var x = (i % 25)/25*c.width - var y = ~~(i/25)/25*c.width - d.pos.all = [x, y] - }) - - var {bw, ageScale, stateScale} = axii - _.sortBy(students, d => -d.age).forEach((d, i) => { - var x = (i % ncols)/(ncols - 1)*c.width - var y = ~~(i/ncols)/(ncols - 1)*c.width - d.pos.grid = [x, y] - scale = .6 - d.pos.smallGrid = [x * scale + 90, y * scale] - }) - - // Set half the student to have plagerized. - var studentsPlagerizedArray = _.sortBy(d3.range(students.length).map(i => i % 2 == 0), () => rand()) - // var remainingPlagerizedArray = _.sortBy(d3.range(allStudents.length - students.length).map(i => i % 2 == 0), () => rand()) - remainingPlagerizedArray = d3.range(students.all.length).map(i => i % 2 == 1) - var plagerizedArray = studentsPlagerizedArray.concat(remainingPlagerizedArray) - students.all.forEach((d, i) => d.plagerized = plagerizedArray[i]) - - students.byAge = d3.nestBy(students, d => d.age) - students.byAge.forEach(age => { - age.forEach((d, i) => { - d.pos.age = [i*10, ageScale(d.age) + bw] - }) - }) - students.byAgeState = d3.nestBy(students, d => d.age + d.state) - students.byAgeState.forEach(group => { - var d0 = group.d0 = group[0] - group.pos = [bw + stateScale(d0.state), bw + ageScale(d0.age)] - - var angle = Math.PI*(3 - Math.sqrt(5))*(1 + Math.random()*.05 - .05/2) - group.forEach((d, i) => { - d.pos.ageState = addVec(phyllotaxis(i, 10.5, angle), group.pos) - d.group.ageState = group - }) - }) - - students.byAgeStateSeason = d3.nestBy(students, d => d.age + d.state + d.season) - students.byAgeStateSeason.forEach(group => { - var d0 = group.d0 = group[0] - group.pos = [bw + stateScale(d0.state), bw*d0.season/2 + ageScale(d0.age)] - - group.forEach((d, i) => { - d.pos.ageStateSeason = addVec([i*11 - group.length*11/2 + 6, 12], group.pos) - d.group.ageStateSeason = group - }) - }) - - - students.updateHeadsPos = function(){ - students.byHeads = d3.nestBy(students, d => d.coinVals[estimates.active.index] < sliders.headsProb) - students.byHeads.forEach(group => { - group.pos = [group.key == 'true' ? c.width/4 -15 : c.width/4*3 +15, c.height/2] - - group.forEach((d, i) => { - d.pos.heads = addVec(phyllotaxis(i, 12), group.pos) - d.group.heads = group - }) - }) - } - - students.plagerizedGroup = d3.nestBy(_.sortBy(students.all, d => d.plagerized), d => d.plagerized) - students.plagerizedGroup.forEach((group, groupIndex) => { - var d0 = group.d0 = group[0] - var offset = -20 - group.pos = [(d0.plagerized ? c.width/2 + offset : c.width/2 - offset), c.height/2 - 80] - - - var getOrderedPositions = function() { - positions = [] - - var step = 25 - var top = 0 - var bottom = 0 - var right = 0 - - var addAbove = function(dirPositive=true) { - var y = (top + 1) * step - var x = 0 - while (x <= right * step) { - positions.push([dirPositive ? x: (right * step - x), y]) - x += step - } - top++ - } - - var addRight = function(dirPositive=true) { - var x = (right + 1) * step - var y = bottom * step - while (y <= top * step) { - positions.push([x, dirPositive ? y: -y]) - y += step - } - right++ - } - - var addBelow = function(dirPositive=true) { - var y = (bottom - 1) * step - var x = 0 - while (x <= right * step) { - positions.push([dirPositive ? x: (right * step - x), y]) - x += step - } - bottom-- - } - - var addForward = function() { - addAbove(true) - addRight(false) - addBelow(false) - } - - var addBackward = function() { - addBelow(true) - addRight(true) - addAbove(false) - } - - isForward = true - while(positions.length < students.all.length) { - if (positions.length === 0) { - positions.push([0, 0]) - addRight() - addBelow() - } else { - if (isForward) { - addForward() - } else { - addBackward() - } - isForward = !isForward - } - } - return positions - } - - var populationPositions = getOrderedPositions() - var reversePositions = populationPositions.map(pos => [-pos[0], pos[1]]) - - group.forEach((d, i) => { - var x = (i % 7)/20*c.width - var y = ~~(i/7)/20*c.width - // d.pos.plagerized = addVec([x, y], group.pos) - d.pos.plagerizedShifted = addVec([x, y - 50], group.pos) - d.group.plagerized = group - - d.pos.plagerizedShifted = addVec((groupIndex === 0) ? populationPositions[i]: reversePositions[i], group.pos) - }) - }) - - - students.rand = rand - return students -} - -if (window.init) window.init() diff --git a/spaces/merve/fill-in-the-blank/public/measuring-fairness/index.html b/spaces/merve/fill-in-the-blank/public/measuring-fairness/index.html deleted file mode 100644 index 4260ecaa54d3d68181d664c9f4c4ddb13d215577..0000000000000000000000000000000000000000 --- a/spaces/merve/fill-in-the-blank/public/measuring-fairness/index.html +++ /dev/null @@ -1,298 +0,0 @@ - - - - - - - - - - - - - - - - - - Measuring Fairness - - - - - - - - - - - - - - - -
      - -
      - -

      Measuring Fairness

      -
      There are multiple ways to measure accuracy. No matter how we build our model, accuracy across these measures will vary when applied to different groups of people.
      - - - - - -
      -
      -
      - - -
      -

      Measuring Fairness

      - -

      How do you make sure a model works equally well for different groups of people? It turns out that in many situations, this is harder than you might think. - -

      The problem is that there are different ways to measure the accuracy of a model, and often it's mathematically impossible for them all to be equal across groups. - -

      We'll illustrate how this happens by creating a (fake) medical model to screen these people for a disease. -

      - - -
      -

      Ground Truth

      - -

      About half of these people actually have the disease a; half of them don't b. -

      - - -
      -

      Model Predictions

      - -

      In a perfect world, only sick people would test positive for the disease and only healthy people would test negative. -

      - - -
      -

      Model Mistakes

      - -

      But models and tests aren't perfect. - -

      The model might make a mistake and mark a sick person as healthy c. - -

      Or the opposite: marking a healthy person as sick f. -

      - - -

      Never Miss the Disease...

      - -

      If there's a simple follow-up test, we could have the model aggressively call close cases so it rarely misses the disease. - -

      We can quantify this by measuring the percentage of sick people a who test positive g. - -

      -
      - - -
      -

      ...Or Avoid Overcalling?

      - -

      On the other hand, if there isn't a secondary test, or the treatment uses a drug with a limited supply, we might care more about the percentage of people with positive tests who are actually sick g . - -

      - -

      These issues and trade-offs in model optimization aren't new, but they're brought into focus when we have the ability to fine-tune exactly how aggressively disease is diagnosed. - -

      - - Try adjusting how aggressive the model is in diagnosing the disease -
      - - -
      -

      Subgroup Analysis

      - -

      Things get even more complicated when we check if the model treats different groups fairly.¹ - -

      Whatever we decide on in terms of trade-offs between these metrics, we'd probably like them to be roughly even across different groups of people. - -

      If we're trying to evenly allocate resources, having the model miss more cases in children than adults would be bad! ² -

      - - -
      -

      Base Rates

      - -

      If you look carefully, you'll see that the disease is more prevalent in children. That is, the "base rate" of the disease is different across groups. - -

      The fact that the base rates are different makes the situation surprisingly tricky. For one thing, even though the test catches the same percentage of sick adults and sick children, an adult who tests positive is less likely to have the disease than a child who tests positive. -

      - - -
      -

      Imbalanced Metrics

      - -

      Why is there a disparity in diagnosing between children and adults? There is a higher proportion of well adults, so mistakes in the test will cause more well adults to be marked "positive" than well children (and similarly with mistaken negatives). - -


      -
      - -

      To fix this, we could have the model take age into account. - -

      -
      -
      - -
      -

      Try adjusting the slider to make the model grade adults less aggressively than children.
      - -
      -

      This allows us to align one metric. But now adults who have the disease are less likely to be diagnosed with it! - -

      -
      -
      - -

      No matter how you move the sliders, you won't be able to make both metrics fair at once. It turns out this is inevitable any time the base rates are different, and the test isn't perfect. - -

      There are multiple ways to define fairness mathematically. It usually isn't possible to satisfy all of them.³ -

      -
      - - -
      -
      -
      - - -

      Conclusion

      - -

      Thankfully, the notion of fairness you choose to satisfy will depend on the context of your model, so while it may not be possible to satisfy every definition of fairness, you can focus on the notions of fairness that make sense for your use case. - -

      Even if fairness along every dimension isn't possible, we shouldn't stop checking for bias. The Hidden Bias explorable outlines different ways human bias can feed into an ML model. - -

      More Reading

      - -

      In some contexts, setting different thresholds for different populations might not be acceptable. Can you make AI fairer than a judge? explores an algorithm that can send people to jail. - -

      There are lots of different metrics you might use to determine if an algorithm is fair. Attacking discrimination with smarter machine learning shows how several of them work. Using Fairness Indicators in conjunction with the What-If Tool and other fairness tools, you can test your own model against commonly used fairness metrics. - -

      Machine learning practitioners use words like “recall” to describe the percentage of sick people who test positive. Checkout the PAIR Guidebook Glossary to learn how to learn how to talk to the people building the models. - -

      Appendix

      - -

      ¹ This essay uses very academic, mathematical standards for fairness that don't encompass everything we might include in the colloquial meaning of fairness. There's a gap between the technical descriptions of algorithms here and the social context that they're deployed in. - -

      ² Sometimes we might care more about different error modes in different populations. If treatment is riskier for children, we'd probably want the model to be less aggressive in diagnosing. - -

      ³The above example assumes the model sorts and scores people based on how likely it is that they are sick. With complete control over the model's exact rate of under- and over-diagnosing in both groups, it's actually possible to align both of the metrics we've discussed so far. Try tweaking the model below to get both of them to line up. - -

      Adding a third metric, the percentage of well people a who test negative e, makes perfect fairness impossible. Can you see why all three metrics won't align unless the base rate of the disease is the same in both populations? - -

      - -
      Drag ⁠— to adjust model accuracy and ⁠| to adjust the occurrence of disease
      -
      - -

      Credits

      - -

      Adam Pearce // May 2020 - -

      Thanks to Carey Radebaugh, Dan Nanas, David Weinberger, Emily Denton, Emily Reif, Fernanda Viégas, Hal Abelson, James Wexler, Kristen Olson, Lucas Dixon, Mahima Pushkarna, Martin Wattenberg, Michael Terry, Rebecca Salois, Timnit Gebru, Tulsee Doshi, Yannick Assogba, Yoni Halpern, Zan Armstrong, and my other colleagues at Google for their help with this piece. - -

      Silhouettes from ProPublica's Wee People. - -

      More Explorables

      - -

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/spaces/merve/measuring-fairness/public/uncertainty-calibration/style.css b/spaces/merve/measuring-fairness/public/uncertainty-calibration/style.css deleted file mode 100644 index 8073cf0a59eac0be0e293b35af5255c40c063e21..0000000000000000000000000000000000000000 --- a/spaces/merve/measuring-fairness/public/uncertainty-calibration/style.css +++ /dev/null @@ -1,89 +0,0 @@ -svg{ - overflow: visible; -} - -text{ - fill: #202124; - user-select: none; -} - -.domain{ - display: none; -} - -.thresholds, .threshold > g{ - cursor: pointer; -} - -svg{ - user-select: none; -} - -text.axis-label .legend-text{ - font-family: 'Roboto'; - font-style: normal; - font-size: 16px; - line-height: 20px; - /* identical to box height, or 125% */ - - fill: #000; -} - -.axis text{ - font-size: 10px; -} - -text{ - text-shadow: 0 1px 0 #fff, 1px 0 0 #fff, 0 -1px 0 #fff, -1px 0 0 #fff; -} - - - - -.bucket text{ - /*text-shadow: 0 1px 0 #000, 1px 0 0 #000, 0 -1px 0 #000, -1px 0 0 #000;*/ - /*fill: #fff;*/ - font-size: 11px; -} - - -.big-text{ - font-variant-numeric: tabular-nums; - font-size: 16px; -} - -#card{ - display: flex; - flex-direction: column; - align-items: flex-start; - padding: 24px 24px; - gap: 6px; - - background: #EDF4EC; - border: 1px solid #34A853; - box-sizing: border-box; - border-radius: 4px; -} - -text.val-text{ - background: #DFE9E1; - border: 1px solid #476C63; - box-sizing: border-box; - border-radius: 4px; - fill: #2A4C4A; - text-shadow: none; -} - -.val-box{ - fill: #DFE9E1; - stroke: #476C63; - opacity: 1; -} - -.legend-title{ - fill: #002622; -} - -h3 { - color: #00695C; -} \ No newline at end of file diff --git a/spaces/merve/uncertainty-calibration/public/third_party/topojson-client.js b/spaces/merve/uncertainty-calibration/public/third_party/topojson-client.js deleted file mode 100644 index 728070f185d11aa72b3f78ab88037275614fe89b..0000000000000000000000000000000000000000 --- a/spaces/merve/uncertainty-calibration/public/third_party/topojson-client.js +++ /dev/null @@ -1,2 +0,0 @@ -// https://github.com/topojson/topojson-client v3.0.1 Copyright 2019 Mike Bostock -!function(e,r){"object"==typeof exports&&"undefined"!=typeof module?r(exports):"function"==typeof define&&define.amd?define(["exports"],r):r((e=e||self).topojson=e.topojson||{})}(this,function(e){"use strict";function r(e){return e}function t(e){if(null==e)return r;var t,n,o=e.scale[0],a=e.scale[1],i=e.translate[0],c=e.translate[1];return function(e,r){r||(t=n=0);var u=2,f=e.length,s=new Array(f);for(s[0]=(t+=e[0])*o+i,s[1]=(n+=e[1])*a+c;ui&&(i=e[0]),e[1]c&&(c=e[1])}function f(e){switch(e.type){case"GeometryCollection":e.geometries.forEach(f);break;case"Point":u(e.coordinates);break;case"MultiPoint":e.coordinates.forEach(u)}}for(r in e.arcs.forEach(function(e){for(var r,t=-1,u=e.length;++ti&&(i=r[0]),r[1]c&&(c=r[1])}),e.objects)f(e.objects[r]);return[o,a,i,c]}function o(e,r){var t=r.id,n=r.bbox,o=null==r.properties?{}:r.properties,i=a(e,r);return null==t&&null==n?{type:"Feature",properties:o,geometry:i}:null==n?{type:"Feature",id:t,properties:o,geometry:i}:{type:"Feature",id:t,bbox:n,properties:o,geometry:i}}function a(e,r){var n=t(e.transform),o=e.arcs;function a(e,r){r.length&&r.pop();for(var t=o[e<0?~e:e],a=0,i=t.length;a1)n=function(e,r,t){var n,o=[],a=[];function i(e){var r=e<0?~e:e;(a[r]||(a[r]=[])).push({i:e,g:n})}function c(e){e.forEach(i)}function u(e){e.forEach(c)}return function e(r){switch(n=r,r.type){case"GeometryCollection":r.geometries.forEach(e);break;case"LineString":c(r.arcs);break;case"MultiLineString":case"Polygon":u(r.arcs);break;case"MultiPolygon":!function(e){e.forEach(u)}(r.arcs)}}(r),a.forEach(null==t?function(e){o.push(e[0].i)}:function(e){t(e[0].g,e[e.length-1].g)&&o.push(e[0].i)}),o}(0,r,t);else for(o=0,n=new Array(a=e.arcs.length);o1)for(var a,c,f=1,s=u(o[0]);fs&&(c=o[0],o[0]=o[f],o[f]=c,s=a);return o}).filter(function(e){return e.length>0})}}function f(e,r){for(var t=0,n=e.length;t>>1;e[o]=2))throw new Error("n must be ≥2");var t,o=(u=e.bbox||n(e))[0],a=u[1],i=u[2],c=u[3];r={scale:[i-o?(i-o)/(t-1):1,c-a?(c-a)/(t-1):1],translate:[o,a]}}var u,f,l=s(r),h=e.objects,p={};function g(e){return l(e)}function y(e){var r;switch(e.type){case"GeometryCollection":r={type:"GeometryCollection",geometries:e.geometries.map(y)};break;case"Point":r={type:"Point",coordinates:g(e.coordinates)};break;case"MultiPoint":r={type:"MultiPoint",coordinates:e.coordinates.map(g)};break;default:return e}return null!=e.id&&(r.id=e.id),null!=e.bbox&&(r.bbox=e.bbox),null!=e.properties&&(r.properties=e.properties),r}for(f in h)p[f]=y(h[f]);return{type:"Topology",bbox:u,transform:r,objects:p,arcs:e.arcs.map(function(e){var r,t=0,n=1,o=e.length,a=new Array(o);for(a[0]=l(e[0],0);++t= 1 - - # Finalize filter kernel. - f = np.array(f, dtype=np.float32) - if f.ndim == 1: - f = f[:, np.newaxis] * f[np.newaxis, :] - assert f.ndim == 2 - if normalize: - f /= np.sum(f) - if flip: - f = f[::-1, ::-1] - f = f[:, :, np.newaxis, np.newaxis] - f = np.tile(f, [1, 1, int(x.shape[1]), 1]) - - # No-op => early exit. - if f.shape == (1, 1) and f[0,0] == 1: - return x - - # Convolve using depthwise_conv2d. - orig_dtype = x.dtype - x = tf.cast(x, tf.float32) # tf.nn.depthwise_conv2d() doesn't support fp16 - f = tf.constant(f, dtype=x.dtype, name='filter') - strides = [1, 1, stride, stride] - x = tf.nn.depthwise_conv2d(x, f, strides=strides, padding='SAME', data_format='NCHW') - x = tf.cast(x, orig_dtype) - return x - -def _upscale2d(x, factor=2, gain=1): - assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:]) - assert isinstance(factor, int) and factor >= 1 - - # Apply gain. - if gain != 1: - x *= gain - - # No-op => early exit. - if factor == 1: - return x - - # Upscale using tf.tile(). - s = x.shape - x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1]) - x = tf.tile(x, [1, 1, 1, factor, 1, factor]) - x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor]) - return x - -def _downscale2d(x, factor=2, gain=1): - assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:]) - assert isinstance(factor, int) and factor >= 1 - - # 2x2, float32 => downscale using _blur2d(). - if factor == 2 and x.dtype == tf.float32: - f = [np.sqrt(gain) / factor] * factor - return _blur2d(x, f=f, normalize=False, stride=factor) - - # Apply gain. - if gain != 1: - x *= gain - - # No-op => early exit. - if factor == 1: - return x - - # Large factor => downscale using tf.nn.avg_pool(). - # NOTE: Requires tf_config['graph_options.place_pruned_graph']=True to work. - ksize = [1, 1, factor, factor] - return tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') - -#---------------------------------------------------------------------------- -# High-level ops for manipulating 4D activation tensors. -# The gradients of these are meant to be as efficient as possible. - -def blur2d(x, f=[1,2,1], normalize=True): - with tf.variable_scope('Blur2D'): - @tf.custom_gradient - def func(x): - y = _blur2d(x, f, normalize) - @tf.custom_gradient - def grad(dy): - dx = _blur2d(dy, f, normalize, flip=True) - return dx, lambda ddx: _blur2d(ddx, f, normalize) - return y, grad - return func(x) - -def upscale2d(x, factor=2): - with tf.variable_scope('Upscale2D'): - @tf.custom_gradient - def func(x): - y = _upscale2d(x, factor) - @tf.custom_gradient - def grad(dy): - dx = _downscale2d(dy, factor, gain=factor**2) - return dx, lambda ddx: _upscale2d(ddx, factor) - return y, grad - return func(x) - -def downscale2d(x, factor=2): - with tf.variable_scope('Downscale2D'): - @tf.custom_gradient - def func(x): - y = _downscale2d(x, factor) - @tf.custom_gradient - def grad(dy): - dx = _upscale2d(dy, factor, gain=1/factor**2) - return dx, lambda ddx: _downscale2d(ddx, factor) - return y, grad - return func(x) - -#---------------------------------------------------------------------------- -# Get/create weight tensor for a convolutional or fully-connected layer. - -def get_weight(shape, gain=np.sqrt(2), use_wscale=False, lrmul=1): - fan_in = np.prod(shape[:-1]) # [kernel, kernel, fmaps_in, fmaps_out] or [in, out] - he_std = gain / np.sqrt(fan_in) # He init - - # Equalized learning rate and custom learning rate multiplier. - if use_wscale: - init_std = 1.0 / lrmul - runtime_coef = he_std * lrmul - else: - init_std = he_std / lrmul - runtime_coef = lrmul - - # Create variable. - init = tf.initializers.random_normal(0, init_std) - return tf.get_variable('weight', shape=shape, initializer=init) * runtime_coef - -#---------------------------------------------------------------------------- -# Fully-connected layer. - -def dense(x, fmaps, **kwargs): - if len(x.shape) > 2: - x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])]) - w = get_weight([x.shape[1].value, fmaps], **kwargs) - w = tf.cast(w, x.dtype) - return tf.matmul(x, w) - -#---------------------------------------------------------------------------- -# Convolutional layer. - -def conv2d(x, fmaps, kernel, **kwargs): - assert kernel >= 1 and kernel % 2 == 1 - w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs) - w = tf.cast(w, x.dtype) - return tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='SAME', data_format='NCHW') - -#---------------------------------------------------------------------------- -# Fused convolution + scaling. -# Faster and uses less memory than performing the operations separately. - -def upscale2d_conv2d(x, fmaps, kernel, fused_scale='auto', **kwargs): - assert kernel >= 1 and kernel % 2 == 1 - assert fused_scale in [True, False, 'auto'] - if fused_scale == 'auto': - fused_scale = min(x.shape[2:]) * 2 >= 128 - - # Not fused => call the individual ops directly. - if not fused_scale: - return conv2d(upscale2d(x), fmaps, kernel, **kwargs) - - # Fused => perform both ops simultaneously using tf.nn.conv2d_transpose(). - w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs) - w = tf.transpose(w, [0, 1, 3, 2]) # [kernel, kernel, fmaps_out, fmaps_in] - w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT') - w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) - w = tf.cast(w, x.dtype) - os = [tf.shape(x)[0], fmaps, x.shape[2] * 2, x.shape[3] * 2] - return tf.nn.conv2d_transpose(x, w, os, strides=[1,1,2,2], padding='SAME', data_format='NCHW') - -def conv2d_downscale2d(x, fmaps, kernel, fused_scale='auto', **kwargs): - assert kernel >= 1 and kernel % 2 == 1 - assert fused_scale in [True, False, 'auto'] - if fused_scale == 'auto': - fused_scale = min(x.shape[2:]) >= 128 - - # Not fused => call the individual ops directly. - if not fused_scale: - return downscale2d(conv2d(x, fmaps, kernel, **kwargs)) - - # Fused => perform both ops simultaneously using tf.nn.conv2d(). - w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs) - w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT') - w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) * 0.25 - w = tf.cast(w, x.dtype) - return tf.nn.conv2d(x, w, strides=[1,1,2,2], padding='SAME', data_format='NCHW') - -#---------------------------------------------------------------------------- -# Apply bias to the given activation tensor. - -def apply_bias(x, lrmul=1): - b = tf.get_variable('bias', shape=[x.shape[1]], initializer=tf.initializers.zeros()) * lrmul - b = tf.cast(b, x.dtype) - if len(x.shape) == 2: - return x + b - return x + tf.reshape(b, [1, -1, 1, 1]) - -#---------------------------------------------------------------------------- -# Leaky ReLU activation. More efficient than tf.nn.leaky_relu() and supports FP16. - -def leaky_relu(x, alpha=0.2): - with tf.variable_scope('LeakyReLU'): - alpha = tf.constant(alpha, dtype=x.dtype, name='alpha') - @tf.custom_gradient - def func(x): - y = tf.maximum(x, x * alpha) - @tf.custom_gradient - def grad(dy): - dx = tf.where(y >= 0, dy, dy * alpha) - return dx, lambda ddx: tf.where(y >= 0, ddx, ddx * alpha) - return y, grad - return func(x) - -#---------------------------------------------------------------------------- -# Pixelwise feature vector normalization. - -def pixel_norm(x, epsilon=1e-8): - with tf.variable_scope('PixelNorm'): - epsilon = tf.constant(epsilon, dtype=x.dtype, name='epsilon') - return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + epsilon) - -#---------------------------------------------------------------------------- -# Instance normalization. - -def instance_norm(x, epsilon=1e-8): - assert len(x.shape) == 4 # NCHW - with tf.variable_scope('InstanceNorm'): - orig_dtype = x.dtype - x = tf.cast(x, tf.float32) - x -= tf.reduce_mean(x, axis=[2,3], keepdims=True) - epsilon = tf.constant(epsilon, dtype=x.dtype, name='epsilon') - x *= tf.rsqrt(tf.reduce_mean(tf.square(x), axis=[2,3], keepdims=True) + epsilon) - x = tf.cast(x, orig_dtype) - return x - -#---------------------------------------------------------------------------- -# Style modulation. - -def style_mod(x, dlatent, **kwargs): - with tf.variable_scope('StyleMod'): - style = apply_bias(dense(dlatent, fmaps=x.shape[1]*2, gain=1, **kwargs)) - style = tf.reshape(style, [-1, 2, x.shape[1]] + [1] * (len(x.shape) - 2)) - return x * (style[:,0] + 1) + style[:,1] - -#---------------------------------------------------------------------------- -# Noise input. - -def apply_noise(x, noise_var=None, randomize_noise=True): - assert len(x.shape) == 4 # NCHW - with tf.variable_scope('Noise'): - if noise_var is None or randomize_noise: - noise = tf.random_normal([tf.shape(x)[0], 1, x.shape[2], x.shape[3]], dtype=x.dtype) - else: - noise = tf.cast(noise_var, x.dtype) - weight = tf.get_variable('weight', shape=[x.shape[1].value], initializer=tf.initializers.zeros()) - return x + noise * tf.reshape(tf.cast(weight, x.dtype), [1, -1, 1, 1]) - -#---------------------------------------------------------------------------- -# Minibatch standard deviation. - -def minibatch_stddev_layer(x, group_size=4, num_new_features=1): - with tf.variable_scope('MinibatchStddev'): - group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size. - s = x.shape # [NCHW] Input shape. - y = tf.reshape(x, [group_size, -1, num_new_features, s[1]//num_new_features, s[2], s[3]]) # [GMncHW] Split minibatch into M groups of size G. Split channels into n channel groups c. - y = tf.cast(y, tf.float32) # [GMncHW] Cast to FP32. - y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMncHW] Subtract mean over group. - y = tf.reduce_mean(tf.square(y), axis=0) # [MncHW] Calc variance over group. - y = tf.sqrt(y + 1e-8) # [MncHW] Calc stddev over group. - y = tf.reduce_mean(y, axis=[2,3,4], keepdims=True) # [Mn111] Take average over fmaps and pixels. - y = tf.reduce_mean(y, axis=[2]) # [Mn11] Split channels into c channel groups - y = tf.cast(y, x.dtype) # [Mn11] Cast back to original data type. - y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [NnHW] Replicate over group and pixels. - return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap. - -#---------------------------------------------------------------------------- -# Style-based generator used in the StyleGAN paper. -# Composed of two sub-networks (G_mapping and G_synthesis) that are defined below. - -def G_style( - latents_in, # First input: Latent vectors (Z) [minibatch, latent_size]. - labels_in, # Second input: Conditioning labels [minibatch, label_size]. - truncation_psi = 0.7, # Style strength multiplier for the truncation trick. None = disable. - truncation_cutoff = 8, # Number of layers for which to apply the truncation trick. None = disable. - truncation_psi_val = None, # Value for truncation_psi to use during validation. - truncation_cutoff_val = None, # Value for truncation_cutoff to use during validation. - dlatent_avg_beta = 0.995, # Decay for tracking the moving average of W during training. None = disable. - style_mixing_prob = 0.9, # Probability of mixing styles during training. None = disable. - is_training = False, # Network is under training? Enables and disables specific features. - is_validation = False, # Network is under validation? Chooses which value to use for truncation_psi. - is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation. - components = dnnlib.EasyDict(), # Container for sub-networks. Retained between calls. - **kwargs): # Arguments for sub-networks (G_mapping and G_synthesis). - - # Validate arguments. - assert not is_training or not is_validation - assert isinstance(components, dnnlib.EasyDict) - if is_validation: - truncation_psi = truncation_psi_val - truncation_cutoff = truncation_cutoff_val - if is_training or (truncation_psi is not None and not tflib.is_tf_expression(truncation_psi) and truncation_psi == 1): - truncation_psi = None - if is_training or (truncation_cutoff is not None and not tflib.is_tf_expression(truncation_cutoff) and truncation_cutoff <= 0): - truncation_cutoff = None - if not is_training or (dlatent_avg_beta is not None and not tflib.is_tf_expression(dlatent_avg_beta) and dlatent_avg_beta == 1): - dlatent_avg_beta = None - if not is_training or (style_mixing_prob is not None and not tflib.is_tf_expression(style_mixing_prob) and style_mixing_prob <= 0): - style_mixing_prob = None - - # Setup components. - if 'synthesis' not in components: - components.synthesis = tflib.Network('G_synthesis', func_name=G_synthesis, **kwargs) - num_layers = components.synthesis.input_shape[1] - dlatent_size = components.synthesis.input_shape[2] - if 'mapping' not in components: - components.mapping = tflib.Network('G_mapping', func_name=G_mapping, dlatent_broadcast=num_layers, **kwargs) - - # Setup variables. - lod_in = tf.get_variable('lod', initializer=np.float32(0), trainable=False) - dlatent_avg = tf.get_variable('dlatent_avg', shape=[dlatent_size], initializer=tf.initializers.zeros(), trainable=False) - - # Evaluate mapping network. - dlatents = components.mapping.get_output_for(latents_in, labels_in, **kwargs) - - # Update moving average of W. - if dlatent_avg_beta is not None: - with tf.variable_scope('DlatentAvg'): - batch_avg = tf.reduce_mean(dlatents[:, 0], axis=0) - update_op = tf.assign(dlatent_avg, tflib.lerp(batch_avg, dlatent_avg, dlatent_avg_beta)) - with tf.control_dependencies([update_op]): - dlatents = tf.identity(dlatents) - - # Perform style mixing regularization. - if style_mixing_prob is not None: - with tf.name_scope('StyleMix'): - latents2 = tf.random_normal(tf.shape(latents_in)) - dlatents2 = components.mapping.get_output_for(latents2, labels_in, **kwargs) - layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis] - cur_layers = num_layers - tf.cast(lod_in, tf.int32) * 2 - mixing_cutoff = tf.cond( - tf.random_uniform([], 0.0, 1.0) < style_mixing_prob, - lambda: tf.random_uniform([], 1, cur_layers, dtype=tf.int32), - lambda: cur_layers) - dlatents = tf.where(tf.broadcast_to(layer_idx < mixing_cutoff, tf.shape(dlatents)), dlatents, dlatents2) - - # Apply truncation trick. - if truncation_psi is not None and truncation_cutoff is not None: - with tf.variable_scope('Truncation'): - layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis] - ones = np.ones(layer_idx.shape, dtype=np.float32) - coefs = tf.where(layer_idx < truncation_cutoff, truncation_psi * ones, ones) - dlatents = tflib.lerp(dlatent_avg, dlatents, coefs) - - # Evaluate synthesis network. - with tf.control_dependencies([tf.assign(components.synthesis.find_var('lod'), lod_in)]): - images_out = components.synthesis.get_output_for(dlatents, force_clean_graph=is_template_graph, **kwargs) - return tf.identity(images_out, name='images_out') - -#---------------------------------------------------------------------------- -# Mapping network used in the StyleGAN paper. - -def G_mapping( - latents_in, # First input: Latent vectors (Z) [minibatch, latent_size]. - labels_in, # Second input: Conditioning labels [minibatch, label_size]. - latent_size = 512, # Latent vector (Z) dimensionality. - label_size = 0, # Label dimensionality, 0 if no labels. - dlatent_size = 512, # Disentangled latent (W) dimensionality. - dlatent_broadcast = None, # Output disentangled latent (W) as [minibatch, dlatent_size] or [minibatch, dlatent_broadcast, dlatent_size]. - mapping_layers = 8, # Number of mapping layers. - mapping_fmaps = 512, # Number of activations in the mapping layers. - mapping_lrmul = 0.01, # Learning rate multiplier for the mapping layers. - mapping_nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu'. - use_wscale = True, # Enable equalized learning rate? - normalize_latents = True, # Normalize latent vectors (Z) before feeding them to the mapping layers? - dtype = 'float32', # Data type to use for activations and outputs. - **_kwargs): # Ignore unrecognized keyword args. - - act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[mapping_nonlinearity] - - # Inputs. - latents_in.set_shape([None, latent_size]) - labels_in.set_shape([None, label_size]) - latents_in = tf.cast(latents_in, dtype) - labels_in = tf.cast(labels_in, dtype) - x = latents_in - - # Embed labels and concatenate them with latents. - if label_size: - with tf.variable_scope('LabelConcat'): - w = tf.get_variable('weight', shape=[label_size, latent_size], initializer=tf.initializers.random_normal()) - y = tf.matmul(labels_in, tf.cast(w, dtype)) - x = tf.concat([x, y], axis=1) - - # Normalize latents. - if normalize_latents: - x = pixel_norm(x) - - # Mapping layers. - for layer_idx in range(mapping_layers): - with tf.variable_scope('Dense%d' % layer_idx): - fmaps = dlatent_size if layer_idx == mapping_layers - 1 else mapping_fmaps - x = dense(x, fmaps=fmaps, gain=gain, use_wscale=use_wscale, lrmul=mapping_lrmul) - x = apply_bias(x, lrmul=mapping_lrmul) - x = act(x) - - # Broadcast. - if dlatent_broadcast is not None: - with tf.variable_scope('Broadcast'): - x = tf.tile(x[:, np.newaxis], [1, dlatent_broadcast, 1]) - - # Output. - assert x.dtype == tf.as_dtype(dtype) - return tf.identity(x, name='dlatents_out') - -#---------------------------------------------------------------------------- -# Synthesis network used in the StyleGAN paper. - -def G_synthesis( - dlatents_in, # Input: Disentangled latents (W) [minibatch, num_layers, dlatent_size]. - dlatent_size = 512, # Disentangled latent (W) dimensionality. - num_channels = 3, # Number of output color channels. - resolution = 1024, # Output resolution. - fmap_base = 8192, # Overall multiplier for the number of feature maps. - fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution. - fmap_max = 512, # Maximum number of feature maps in any layer. - use_styles = True, # Enable style inputs? - const_input_layer = True, # First layer is a learned constant? - use_noise = True, # Enable noise inputs? - randomize_noise = True, # True = randomize noise inputs every time (non-deterministic), False = read noise inputs from variables. - nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu' - use_wscale = True, # Enable equalized learning rate? - use_pixel_norm = False, # Enable pixelwise feature vector normalization? - use_instance_norm = True, # Enable instance normalization? - dtype = 'float32', # Data type to use for activations and outputs. - fused_scale = 'auto', # True = fused convolution + scaling, False = separate ops, 'auto' = decide automatically. - blur_filter = [1,2,1], # Low-pass filter to apply when resampling activations. None = no filtering. - structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically. - is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation. - force_clean_graph = False, # True = construct a clean graph that looks nice in TensorBoard, False = default behavior. - **_kwargs): # Ignore unrecognized keyword args. - - resolution_log2 = int(np.log2(resolution)) - assert resolution == 2**resolution_log2 and resolution >= 4 - def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max) - def blur(x): return blur2d(x, blur_filter) if blur_filter else x - if is_template_graph: force_clean_graph = True - if force_clean_graph: randomize_noise = False - if structure == 'auto': structure = 'linear' if force_clean_graph else 'recursive' - act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[nonlinearity] - num_layers = resolution_log2 * 2 - 2 - num_styles = num_layers if use_styles else 1 - images_out = None - - # Primary inputs. - dlatents_in.set_shape([None, num_styles, dlatent_size]) - dlatents_in = tf.cast(dlatents_in, dtype) - lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0), trainable=False), dtype) - - # Noise inputs. - noise_inputs = [] - if use_noise: - for layer_idx in range(num_layers): - res = layer_idx // 2 + 2 - shape = [1, use_noise, 2**res, 2**res] - noise_inputs.append(tf.get_variable('noise%d' % layer_idx, shape=shape, initializer=tf.initializers.random_normal(), trainable=False)) - - # Things to do at the end of each layer. - def layer_epilogue(x, layer_idx): - if use_noise: - x = apply_noise(x, noise_inputs[layer_idx], randomize_noise=randomize_noise) - x = apply_bias(x) - x = act(x) - if use_pixel_norm: - x = pixel_norm(x) - if use_instance_norm: - x = instance_norm(x) - if use_styles: - x = style_mod(x, dlatents_in[:, layer_idx], use_wscale=use_wscale) - return x - - # Early layers. - with tf.variable_scope('4x4'): - if const_input_layer: - with tf.variable_scope('Const'): - x = tf.get_variable('const', shape=[1, nf(1), 4, 4], initializer=tf.initializers.ones()) - x = layer_epilogue(tf.tile(tf.cast(x, dtype), [tf.shape(dlatents_in)[0], 1, 1, 1]), 0) - else: - with tf.variable_scope('Dense'): - x = dense(dlatents_in[:, 0], fmaps=nf(1)*16, gain=gain/4, use_wscale=use_wscale) # tweak gain to match the official implementation of Progressing GAN - x = layer_epilogue(tf.reshape(x, [-1, nf(1), 4, 4]), 0) - with tf.variable_scope('Conv'): - x = layer_epilogue(conv2d(x, fmaps=nf(1), kernel=3, gain=gain, use_wscale=use_wscale), 1) - - # Building blocks for remaining layers. - def block(res, x): # res = 3..resolution_log2 - with tf.variable_scope('%dx%d' % (2**res, 2**res)): - with tf.variable_scope('Conv0_up'): - x = layer_epilogue(blur(upscale2d_conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale, fused_scale=fused_scale)), res*2-4) - with tf.variable_scope('Conv1'): - x = layer_epilogue(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale), res*2-3) - return x - def torgb(res, x): # res = 2..resolution_log2 - lod = resolution_log2 - res - with tf.variable_scope('ToRGB_lod%d' % lod): - return apply_bias(conv2d(x, fmaps=num_channels, kernel=1, gain=1, use_wscale=use_wscale)) - - # Fixed structure: simple and efficient, but does not support progressive growing. - if structure == 'fixed': - for res in range(3, resolution_log2 + 1): - x = block(res, x) - images_out = torgb(resolution_log2, x) - - # Linear structure: simple but inefficient. - if structure == 'linear': - images_out = torgb(2, x) - for res in range(3, resolution_log2 + 1): - lod = resolution_log2 - res - x = block(res, x) - img = torgb(res, x) - images_out = upscale2d(images_out) - with tf.variable_scope('Grow_lod%d' % lod): - images_out = tflib.lerp_clip(img, images_out, lod_in - lod) - - # Recursive structure: complex but efficient. - if structure == 'recursive': - def cset(cur_lambda, new_cond, new_lambda): - return lambda: tf.cond(new_cond, new_lambda, cur_lambda) - def grow(x, res, lod): - y = block(res, x) - img = lambda: upscale2d(torgb(res, y), 2**lod) - img = cset(img, (lod_in > lod), lambda: upscale2d(tflib.lerp(torgb(res, y), upscale2d(torgb(res - 1, x)), lod_in - lod), 2**lod)) - if lod > 0: img = cset(img, (lod_in < lod), lambda: grow(y, res + 1, lod - 1)) - return img() - images_out = grow(x, 3, resolution_log2 - 3) - - assert images_out.dtype == tf.as_dtype(dtype) - return tf.identity(images_out, name='images_out') - -#---------------------------------------------------------------------------- -# Discriminator used in the StyleGAN paper. - -def D_basic( - images_in, # First input: Images [minibatch, channel, height, width]. - labels_in, # Second input: Labels [minibatch, label_size]. - num_channels = 1, # Number of input color channels. Overridden based on dataset. - resolution = 32, # Input resolution. Overridden based on dataset. - label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset. - fmap_base = 8192, # Overall multiplier for the number of feature maps. - fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution. - fmap_max = 512, # Maximum number of feature maps in any layer. - nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu', - use_wscale = True, # Enable equalized learning rate? - mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, 0 = disable. - mbstd_num_features = 1, # Number of features for the minibatch standard deviation layer. - dtype = 'float32', # Data type to use for activations and outputs. - fused_scale = 'auto', # True = fused convolution + scaling, False = separate ops, 'auto' = decide automatically. - blur_filter = [1,2,1], # Low-pass filter to apply when resampling activations. None = no filtering. - structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically. - is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation. - **_kwargs): # Ignore unrecognized keyword args. - - resolution_log2 = int(np.log2(resolution)) - assert resolution == 2**resolution_log2 and resolution >= 4 - def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max) - def blur(x): return blur2d(x, blur_filter) if blur_filter else x - if structure == 'auto': structure = 'linear' if is_template_graph else 'recursive' - act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[nonlinearity] - - images_in.set_shape([None, num_channels, resolution, resolution]) - labels_in.set_shape([None, label_size]) - images_in = tf.cast(images_in, dtype) - labels_in = tf.cast(labels_in, dtype) - lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype) - scores_out = None - - # Building blocks. - def fromrgb(x, res): # res = 2..resolution_log2 - with tf.variable_scope('FromRGB_lod%d' % (resolution_log2 - res)): - return act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=1, gain=gain, use_wscale=use_wscale))) - def block(x, res): # res = 2..resolution_log2 - with tf.variable_scope('%dx%d' % (2**res, 2**res)): - if res >= 3: # 8x8 and up - with tf.variable_scope('Conv0'): - x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale))) - with tf.variable_scope('Conv1_down'): - x = act(apply_bias(conv2d_downscale2d(blur(x), fmaps=nf(res-2), kernel=3, gain=gain, use_wscale=use_wscale, fused_scale=fused_scale))) - else: # 4x4 - if mbstd_group_size > 1: - x = minibatch_stddev_layer(x, mbstd_group_size, mbstd_num_features) - with tf.variable_scope('Conv'): - x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale))) - with tf.variable_scope('Dense0'): - x = act(apply_bias(dense(x, fmaps=nf(res-2), gain=gain, use_wscale=use_wscale))) - with tf.variable_scope('Dense1'): - x = apply_bias(dense(x, fmaps=max(label_size, 1), gain=1, use_wscale=use_wscale)) - return x - - # Fixed structure: simple and efficient, but does not support progressive growing. - if structure == 'fixed': - x = fromrgb(images_in, resolution_log2) - for res in range(resolution_log2, 2, -1): - x = block(x, res) - scores_out = block(x, 2) - - # Linear structure: simple but inefficient. - if structure == 'linear': - img = images_in - x = fromrgb(img, resolution_log2) - for res in range(resolution_log2, 2, -1): - lod = resolution_log2 - res - x = block(x, res) - img = downscale2d(img) - y = fromrgb(img, res - 1) - with tf.variable_scope('Grow_lod%d' % lod): - x = tflib.lerp_clip(x, y, lod_in - lod) - scores_out = block(x, 2) - - # Recursive structure: complex but efficient. - if structure == 'recursive': - def cset(cur_lambda, new_cond, new_lambda): - return lambda: tf.cond(new_cond, new_lambda, cur_lambda) - def grow(res, lod): - x = lambda: fromrgb(downscale2d(images_in, 2**lod), res) - if lod > 0: x = cset(x, (lod_in < lod), lambda: grow(res + 1, lod - 1)) - x = block(x(), res); y = lambda: x - if res > 2: y = cset(y, (lod_in > lod), lambda: tflib.lerp(x, fromrgb(downscale2d(images_in, 2**(lod+1)), res - 1), lod_in - lod)) - return y() - scores_out = grow(2, resolution_log2 - 2) - - # Label conditioning from "Which Training Methods for GANs do actually Converge?" - if label_size: - with tf.variable_scope('LabelSwitch'): - scores_out = tf.reduce_sum(scores_out * labels_in, axis=1, keepdims=True) - - assert scores_out.dtype == tf.as_dtype(dtype) - scores_out = tf.identity(scores_out, name='scores_out') - return scores_out - -#---------------------------------------------------------------------------- diff --git a/spaces/mikeee/radiobee-dev/radiobee/gen_pset.py b/spaces/mikeee/radiobee-dev/radiobee/gen_pset.py deleted file mode 100644 index aac4eb9f77b45558f4b6e91cea341f6818204203..0000000000000000000000000000000000000000 --- a/spaces/mikeee/radiobee-dev/radiobee/gen_pset.py +++ /dev/null @@ -1,184 +0,0 @@ -"""Gne pset from cmat. Find pairs for a given cmat. - -tinybee.find_pairs.py with fixed estimator='dbscan' eps=eps, min_samples=min_samples -""" -# pylint: disable=too-many-locals, unused-import, invalid-name - -from typing import List, Tuple, Union - -import numpy as np -import pandas as pd -from sklearn.cluster import DBSCAN -import logzero -from logzero import logger -from radiobee.cmat2tset import cmat2tset -from radiobee.interpolate_pset import interpolate_pset - - -def _gen_pset( - cmat1: Union[List[List[float]], np.ndarray, pd.DataFrame], - eps: float = 10, - min_samples: int = 6, - delta: float = 7, - verbose: Union[bool, int] = False, - # ) -> List[Tuple[int, int, Union[float, str]]]: -) -> List[Tuple[Union[float, str], Union[float, str], Union[float, str]]]: - """Gen pset from cmat. - - Find pairs for a given cmat. - - Args: - cmat: correlation/similarity matrix - eps: min epsilon for DBSCAN (10) - min_samples: minimum # of samples for DBSCAN (6) - delta: tolerance (7) - - Returns: - pairs + "" or metric (float) - - dbscan_pairs' setup - if eps is None: - eps = src_len * .01 - if eps < 3: - eps = 3 - if min_samples is None: - min_samples = tgt_len / 100 * 0.5 - if min_samples < 3: - min_samples = 3 - - def gen_eps_minsamples(src_len, tgt_len): - eps = src_len * .01 - if eps < 3: - eps = 3 - - min_samples = tgt_len / 100 * 0.5 - if min_samples < 3: - min_samples = 3 - return {"eps": eps, "min_samples": min_samples} - - """ - if isinstance(verbose, bool): - if verbose: - verbose = 10 - else: - verbose = 20 - logzero.loglevel(verbose) - - # if isinstance(cmat, list): - cmat = np.array(cmat1) - - src_len, tgt_len = cmat.shape - - # tset = cmat2tset(cmat) - tset = cmat2tset(cmat).tolist() - - logger.debug("tset: %s", tset) - - # iset = gen_iset(cmat, verbose=verbose, estimator=estimator) - labels = DBSCAN(eps=eps, min_samples=min_samples).fit(tset).labels_ - - df_tset = pd.DataFrame(tset, columns=["x", "y", "cos"]) - cset = df_tset[labels > -1].to_numpy() - - # sort cset - _ = sorted(cset.tolist(), key=lambda x: x[0]) - iset = interpolate_pset(_, tgt_len) - - # *_, ymax = zip(*tset) - # ymax = list(ymax) - # low_ = np.min(ymax) - 1 # reset to minimum_value - 1 - - buff = [(-1, -1, ""), (tgt_len, src_len, "")] - - # for idx, tset_elm in enumerate(tset): - for tset_elm in tset: - logger.debug("buff: %s", buff) - # postion max in ymax and insert in buff - # if with range given by iset+-delta and - # it's valid (do not exceed constraint - # by neighboring points - - # argmax = int(np.argmax(ymax)) - - # logger.debug("=== %s,%s === %s", _, argmax, tset[_]) - logger.debug("=== %s === %s", _, tset_elm) - - # ymax[_] = low_ - # elm = tset[argmax] - # elm0, *_ = elm - - elm0, *_ = tset_elm - - # position elm in buff - idx = -1 # for making pyright happy - for idx, loc in enumerate(buff): - if loc[0] > elm0: - break - else: - idx += 1 # last - - # insert elm in for valid elm - # (within range inside two neighboring points) - - # pos = int(tset[argmax][0]) - pos = int(tset_elm[0]) - logger.debug(" %s <=> %s ", tset_elm, iset[pos]) - - # if abs(tset[argmax][1] - iset[pos][1]) <= delta: - if abs(tset_elm[1] - iset[pos][1]) <= delta: - if tset_elm[1] > buff[idx - 1][1] and tset_elm[1] < buff[idx][1]: - buff.insert(idx, tset_elm) - logger.debug("idx: %s, tset_elm: %s", idx, tset_elm) - else: - logger.debug("\t***\t idx: %s, tset_elm: %s", idx, tset_elm) - _ = """ - if abs(tset[loc][1] - iset[loc][1]) <= delta: - if tset[loc][1] > buff[idx][1] and tset[loc][1] < buff[idx + 1][1]: - buff.insert(idx + 1, tset[loc]) - # """ - - # remove first and last entry in buff - buff.pop(0) - buff.pop() - - # return [(1, 1, "")] - return [(int(elm0), int(elm1), elm2) for elm0, elm1, elm2 in buff] - - -def gen_pset( - cmat1: Union[List[List[float]], np.ndarray, pd.DataFrame], - eps: float = 10, - min_samples: int = 6, - delta: float = 7, - verbose: Union[bool, int] = False, -) -> List[Tuple[Union[float, str], Union[float, str], Union[float, str]]]: - """Gen pset. - - Refer to _gen_pset. - """ - del verbose - gen_pset.min_samples = min_samples - for min_s in range(min_samples): - logger.debug(" min_samples, try %s", min_samples - min_s) - try: - pset = _gen_pset( - cmat1, - eps=eps, - min_samples=min_samples - min_s, - delta=delta, - ) - break - except ValueError: - logger.debug(" decrease min_samples by %s", min_s + 1) - continue - except Exception as e: - logger.error(e) - continue - else: - # break should happen above when min_samples = 2 - raise Exception("bummer, this shouldn't happen, probably another bug") - - # store new min_samples - gen_pset.min_samples = min_samples - min_s - - return pset diff --git a/spaces/mileslilly/City-classifier/app.py b/spaces/mileslilly/City-classifier/app.py deleted file mode 100644 index b135180b31516ef449d8874f53a72329dee8e346..0000000000000000000000000000000000000000 --- a/spaces/mileslilly/City-classifier/app.py +++ /dev/null @@ -1,24 +0,0 @@ -# -*- coding: utf-8 -*- -"""city_classifier_production - -Automatically generated by Colaboratory. - -Original file is located at - https://colab.research.google.com/drive/1L-z1dtcO8Co-TSZyGVYHNUAX3wvUSswD -""" -#hide -from fastbook import * -from fastai.vision.widgets import * -from fastai.vision.utils import * - - -learn = load_learner('model.pkl') -labels = learn.dls.vocab -def predict(img): - img = PILImage.create(img) - pred,pred_idx,probs = learn.predict(img) - return {labels[i]: float(probs[i]) for i in range(len(labels))} - -import gradio as gr -gr.Interface(fn=predict, inputs=gr.inputs.Image(shape=(512, 512)), outputs=gr.outputs.Label(num_top_classes=3)).launch(share=False) - diff --git a/spaces/mingyuan/ReMoDiffuse/mogen/version.py b/spaces/mingyuan/ReMoDiffuse/mogen/version.py deleted file mode 100644 index 12cc21f96506948cb046d25424cf0a3a9a8ef40f..0000000000000000000000000000000000000000 --- a/spaces/mingyuan/ReMoDiffuse/mogen/version.py +++ /dev/null @@ -1,25 +0,0 @@ -__version__ = '0.0.1' - - -def parse_version_info(version_str): - """Parse a version string into a tuple. - Args: - version_str (str): The version string. - Returns: - tuple[int | str]: The version info, e.g., "1.3.0" is parsed into - (1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1'). - """ - version_info = [] - for x in version_str.split('.'): - if x.isdigit(): - version_info.append(int(x)) - elif x.find('rc') != -1: - patch_version = x.split('rc') - version_info.append(int(patch_version[0])) - version_info.append(f'rc{patch_version[1]}') - return tuple(version_info) - - -version_info = parse_version_info(__version__) - -__all__ = ['__version__', 'version_info', 'parse_version_info'] \ No newline at end of file diff --git a/spaces/minoluusa/chatbot1/app.py b/spaces/minoluusa/chatbot1/app.py deleted file mode 100644 index 7345812c1199b971c56bdda3df698687ad0b0352..0000000000000000000000000000000000000000 --- a/spaces/minoluusa/chatbot1/app.py +++ /dev/null @@ -1,28 +0,0 @@ -import openai -import gradio as gr -import os - - -def CallChatGPT(question): - - openai.api_key = os.getenv("OPENAI_API_KEY") - #openai.api_key = 'sk-AAAcccccccccABC' - - response = openai.Completion.create( - model="text-davinci-003", - prompt=question, - temperature=1, - max_tokens=256, - top_p=1, - frequency_penalty=0, - presence_penalty=0 - ) - return response['choices'][0].text - - -#q = input('AI:') -#ans = CallChatGPT(q) -#print(ans) - -gui = gr.Interface(fn=CallChatGPT, inputs='text', outputs='text') -gui.launch(share=False) diff --git a/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/data/datasets/register_coco_stuff.py b/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/data/datasets/register_coco_stuff.py deleted file mode 100644 index d1a0f5b571a971fe20ebc8932d27499de856a565..0000000000000000000000000000000000000000 --- a/spaces/mmlab-ntu/Segment-Any-RGBD/open_vocab_seg/data/datasets/register_coco_stuff.py +++ /dev/null @@ -1,250 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import os - -from detectron2.data import DatasetCatalog, MetadataCatalog -from detectron2.data.datasets import load_sem_seg - - -COCO_CATEGORIES = [ - {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"}, - {"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"}, - {"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"}, - {"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"}, - {"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"}, - {"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"}, - {"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"}, - {"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"}, - {"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"}, - {"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"}, - {"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"}, - {"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"}, - {"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"}, - {"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"}, - {"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"}, - {"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"}, - {"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"}, - {"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"}, - {"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"}, - {"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"}, - {"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"}, - {"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"}, - {"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"}, - {"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"}, - {"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"}, - {"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"}, - {"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"}, - {"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"}, - {"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"}, - {"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"}, - {"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"}, - {"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"}, - {"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"}, - {"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"}, - {"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"}, - {"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"}, - {"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"}, - {"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"}, - {"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"}, - {"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"}, - {"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"}, - {"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"}, - {"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"}, - {"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"}, - {"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"}, - {"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"}, - {"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"}, - {"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"}, - {"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"}, - {"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"}, - {"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"}, - {"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"}, - {"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"}, - {"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"}, - {"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"}, - {"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"}, - {"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"}, - {"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"}, - {"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"}, - {"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"}, - {"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"}, - {"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"}, - {"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"}, - {"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"}, - {"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"}, - {"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"}, - {"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"}, - {"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"}, - {"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"}, - {"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"}, - {"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"}, - {"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"}, - {"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"}, - {"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"}, - {"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"}, - {"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"}, - {"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"}, - {"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"}, - {"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"}, - {"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"}, - {"id": 92, "name": "banner", "supercategory": "textile"}, - {"id": 93, "name": "blanket", "supercategory": "textile"}, - {"id": 94, "name": "branch", "supercategory": "plant"}, - {"id": 95, "name": "bridge", "supercategory": "building"}, - {"id": 96, "name": "building-other", "supercategory": "building"}, - {"id": 97, "name": "bush", "supercategory": "plant"}, - {"id": 98, "name": "cabinet", "supercategory": "furniture-stuff"}, - {"id": 99, "name": "cage", "supercategory": "structural"}, - {"id": 100, "name": "cardboard", "supercategory": "raw-material"}, - {"id": 101, "name": "carpet", "supercategory": "floor"}, - {"id": 102, "name": "ceiling-other", "supercategory": "ceiling"}, - {"id": 103, "name": "ceiling-tile", "supercategory": "ceiling"}, - {"id": 104, "name": "cloth", "supercategory": "textile"}, - {"id": 105, "name": "clothes", "supercategory": "textile"}, - {"id": 106, "name": "clouds", "supercategory": "sky"}, - {"id": 107, "name": "counter", "supercategory": "furniture-stuff"}, - {"id": 108, "name": "cupboard", "supercategory": "furniture-stuff"}, - {"id": 109, "name": "curtain", "supercategory": "textile"}, - {"id": 110, "name": "desk-stuff", "supercategory": "furniture-stuff"}, - {"id": 111, "name": "dirt", "supercategory": "ground"}, - {"id": 112, "name": "door-stuff", "supercategory": "furniture-stuff"}, - {"id": 113, "name": "fence", "supercategory": "structural"}, - {"id": 114, "name": "floor-marble", "supercategory": "floor"}, - {"id": 115, "name": "floor-other", "supercategory": "floor"}, - {"id": 116, "name": "floor-stone", "supercategory": "floor"}, - {"id": 117, "name": "floor-tile", "supercategory": "floor"}, - {"id": 118, "name": "floor-wood", "supercategory": "floor"}, - {"id": 119, "name": "flower", "supercategory": "plant"}, - {"id": 120, "name": "fog", "supercategory": "water"}, - {"id": 121, "name": "food-other", "supercategory": "food-stuff"}, - {"id": 122, "name": "fruit", "supercategory": "food-stuff"}, - {"id": 123, "name": "furniture-other", "supercategory": "furniture-stuff"}, - {"id": 124, "name": "grass", "supercategory": "plant"}, - {"id": 125, "name": "gravel", "supercategory": "ground"}, - {"id": 126, "name": "ground-other", "supercategory": "ground"}, - {"id": 127, "name": "hill", "supercategory": "solid"}, - {"id": 128, "name": "house", "supercategory": "building"}, - {"id": 129, "name": "leaves", "supercategory": "plant"}, - {"id": 130, "name": "light", "supercategory": "furniture-stuff"}, - {"id": 131, "name": "mat", "supercategory": "textile"}, - {"id": 132, "name": "metal", "supercategory": "raw-material"}, - {"id": 133, "name": "mirror-stuff", "supercategory": "furniture-stuff"}, - {"id": 134, "name": "moss", "supercategory": "plant"}, - {"id": 135, "name": "mountain", "supercategory": "solid"}, - {"id": 136, "name": "mud", "supercategory": "ground"}, - {"id": 137, "name": "napkin", "supercategory": "textile"}, - {"id": 138, "name": "net", "supercategory": "structural"}, - {"id": 139, "name": "paper", "supercategory": "raw-material"}, - {"id": 140, "name": "pavement", "supercategory": "ground"}, - {"id": 141, "name": "pillow", "supercategory": "textile"}, - {"id": 142, "name": "plant-other", "supercategory": "plant"}, - {"id": 143, "name": "plastic", "supercategory": "raw-material"}, - {"id": 144, "name": "platform", "supercategory": "ground"}, - {"id": 145, "name": "playingfield", "supercategory": "ground"}, - {"id": 146, "name": "railing", "supercategory": "structural"}, - {"id": 147, "name": "railroad", "supercategory": "ground"}, - {"id": 148, "name": "river", "supercategory": "water"}, - {"id": 149, "name": "road", "supercategory": "ground"}, - {"id": 150, "name": "rock", "supercategory": "solid"}, - {"id": 151, "name": "roof", "supercategory": "building"}, - {"id": 152, "name": "rug", "supercategory": "textile"}, - {"id": 153, "name": "salad", "supercategory": "food-stuff"}, - {"id": 154, "name": "sand", "supercategory": "ground"}, - {"id": 155, "name": "sea", "supercategory": "water"}, - {"id": 156, "name": "shelf", "supercategory": "furniture-stuff"}, - {"id": 157, "name": "sky-other", "supercategory": "sky"}, - {"id": 158, "name": "skyscraper", "supercategory": "building"}, - {"id": 159, "name": "snow", "supercategory": "ground"}, - {"id": 160, "name": "solid-other", "supercategory": "solid"}, - {"id": 161, "name": "stairs", "supercategory": "furniture-stuff"}, - {"id": 162, "name": "stone", "supercategory": "solid"}, - {"id": 163, "name": "straw", "supercategory": "plant"}, - {"id": 164, "name": "structural-other", "supercategory": "structural"}, - {"id": 165, "name": "table", "supercategory": "furniture-stuff"}, - {"id": 166, "name": "tent", "supercategory": "building"}, - {"id": 167, "name": "textile-other", "supercategory": "textile"}, - {"id": 168, "name": "towel", "supercategory": "textile"}, - {"id": 169, "name": "tree", "supercategory": "plant"}, - {"id": 170, "name": "vegetable", "supercategory": "food-stuff"}, - {"id": 171, "name": "wall-brick", "supercategory": "wall"}, - {"id": 172, "name": "wall-concrete", "supercategory": "wall"}, - {"id": 173, "name": "wall-other", "supercategory": "wall"}, - {"id": 174, "name": "wall-panel", "supercategory": "wall"}, - {"id": 175, "name": "wall-stone", "supercategory": "wall"}, - {"id": 176, "name": "wall-tile", "supercategory": "wall"}, - {"id": 177, "name": "wall-wood", "supercategory": "wall"}, - {"id": 178, "name": "water-other", "supercategory": "water"}, - {"id": 179, "name": "waterdrops", "supercategory": "water"}, - {"id": 180, "name": "window-blind", "supercategory": "window"}, - {"id": 181, "name": "window-other", "supercategory": "window"}, - {"id": 182, "name": "wood", "supercategory": "solid"}, -] - -def _get_coco_stuff_meta(cat_list): - # Id 0 is reserved for ignore_label, we change ignore_label for 0 - # to 255 in our pre-processing. - stuff_ids = [k["id"] for k in cat_list] - - # For semantic segmentation, this mapping maps from contiguous stuff id - # (in [0, 91], used in models) to ids in the dataset (used for processing results) - stuff_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(stuff_ids)} - stuff_classes = [k["name"] for k in cat_list] - - ret = { - "stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id, - "stuff_classes": stuff_classes, - } - return ret - - -def register_all_coco_stuff_10k(root): - root = os.path.join(root, "coco", "coco_stuff_10k") - meta = _get_coco_stuff_meta(COCO_CATEGORIES) - for name, image_dirname, sem_seg_dirname in [ - ("train", "images_detectron2/train", "annotations_detectron2/train"), - ]: - image_dir = os.path.join(root, image_dirname) - gt_dir = os.path.join(root, sem_seg_dirname) - name = f"coco_2017_{name}_stuff_10k_sem_seg" - DatasetCatalog.register( - name, - lambda x=image_dir, y=gt_dir: load_sem_seg( - y, x, gt_ext="png", image_ext="jpg" - ), - ) - MetadataCatalog.get(name).set( - image_root=image_dir, - sem_seg_root=gt_dir, - evaluator_type="sem_seg", - ignore_label=255, - **meta, - ) - - -def register_all_coco_stuff(root): - root = os.path.join(root, "coco") - meta = _get_coco_stuff_meta(COCO_CATEGORIES) - - for name, image_dirname, sem_seg_dirname in [ - ("train", "train2017", "stuffthingmaps_detectron2/train2017"), - ]: - image_dir = os.path.join(root, image_dirname) - gt_dir = os.path.join(root, sem_seg_dirname) - all_name = f"coco_2017_{name}_stuff_sem_seg" - DatasetCatalog.register( - all_name, - lambda x=image_dir, y=gt_dir: load_sem_seg( - y, x, gt_ext="png", image_ext="jpg" - ), - ) - MetadataCatalog.get(all_name).set( - image_root=image_dir, - sem_seg_root=gt_dir, - evaluator_type="sem_seg", - ignore_label=255, - **meta, - ) - - -_root = os.getenv("DETECTRON2_DATASETS", "datasets") -register_all_coco_stuff_10k(_root) -register_all_coco_stuff(_root) diff --git a/spaces/mpatel57/WOUAF-Text-to-Image/torch_utils/__init__.py b/spaces/mpatel57/WOUAF-Text-to-Image/torch_utils/__init__.py deleted file mode 100644 index ece0ea08fe2e939cc260a1dafc0ab5b391b773d9..0000000000000000000000000000000000000000 --- a/spaces/mpatel57/WOUAF-Text-to-Image/torch_utils/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -# empty diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/fast_noisy_channel/noisy_channel_sequence_generator.py b/spaces/mshukor/UnIVAL/fairseq/examples/fast_noisy_channel/noisy_channel_sequence_generator.py deleted file mode 100644 index ea8fae98e87e9f3e69bc51987703a6429eb0c92a..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/fast_noisy_channel/noisy_channel_sequence_generator.py +++ /dev/null @@ -1,842 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from typing import Dict, List, Optional - -import math -import numpy as np - -import torch -import torch.nn.functional as F -from torch import Tensor - -from .noisy_channel_beam_search import NoisyChannelBeamSearch -from fairseq.sequence_generator import EnsembleModel - - -class NoisyChannelSequenceGenerator(object): - def __init__( - self, - combine_method, - tgt_dict, - src_dict=None, - beam_size=1, - max_len_a=0, - max_len_b=200, - min_len=1, - len_penalty=1.0, - unk_penalty=0.0, - retain_dropout=False, - temperature=1.0, - match_source_len=False, - no_repeat_ngram_size=0, - normalize_scores=True, - channel_models=None, - k2=10, - ch_weight=1.0, - channel_scoring_type='log_norm', - top_k_vocab=0, - lm_models=None, - lm_dict=None, - lm_weight=1.0, - normalize_lm_scores_by_tgt_len=False, - ): - """Generates translations of a given source sentence, - using beam search with noisy channel decoding. - - Args: - combine_method (string, optional): Method to combine direct, LM and - channel model scores (default: None) - tgt_dict (~fairseq.data.Dictionary): target dictionary - src_dict (~fairseq.data.Dictionary): source dictionary - beam_size (int, optional): beam width (default: 1) - max_len_a/b (int, optional): generate sequences of maximum length - ax + b, where x is the source length - min_len (int, optional): the minimum length of the generated output - (not including end-of-sentence) - len_penalty (float, optional): length penalty, where <1.0 favors - shorter, >1.0 favors longer sentences (default: 1.0) - unk_penalty (float, optional): unknown word penalty, where <0 - produces more unks, >0 produces fewer (default: 0.0) - retain_dropout (bool, optional): use dropout when generating - (default: False) - temperature (float, optional): temperature, where values - >1.0 produce more uniform samples and values <1.0 produce - sharper samples (default: 1.0) - match_source_len (bool, optional): outputs should match the source - length (default: False) - no_repeat_ngram_size (int, optional): Size of n-grams that we avoid - repeating in the generation (default: 0) - normalize_scores (bool, optional): normalize scores by the length - of the output (default: True) - channel_models (List[~fairseq.models.FairseqModel]): ensemble of models - translating from the target to the source - k2 (int, optional): Top K2 candidates to score per beam at each step (default:10) - ch_weight (int, optional): Weight associated with the channel model score - assuming that the direct model score has weight 1.0 (default: 1.0) - channel_scoring_type (str, optional): String specifying how to score - the channel model (default: 'log_norm') - top_k_vocab (int, optional): If `channel_scoring_type` is `'src_vocab'` or - `'src_vocab_batched'`, then this parameter specifies the number of - most frequent tokens to include in the channel model output vocabulary, - in addition to the source tokens in the input batch (default: 0) - lm_models (List[~fairseq.models.FairseqModel]): ensemble of models - generating text in the target language - lm_dict (~fairseq.data.Dictionary): LM Model dictionary - lm_weight (int, optional): Weight associated with the LM model score - assuming that the direct model score has weight 1.0 (default: 1.0) - normalize_lm_scores_by_tgt_len (bool, optional): Should we normalize LM scores - by the target length? By default, we normalize the combination of - LM and channel model scores by the source length - """ - self.pad = tgt_dict.pad() - self.unk = tgt_dict.unk() - self.eos = tgt_dict.eos() - self.vocab_size = len(tgt_dict) - self.beam_size = beam_size - # the max beam size is the dictionary size - 1, since we never select pad - self.beam_size = min(beam_size, self.vocab_size - 1) - self.max_len_a = max_len_a - self.max_len_b = max_len_b - self.min_len = min_len - self.normalize_scores = normalize_scores - self.len_penalty = len_penalty - self.unk_penalty = unk_penalty - self.retain_dropout = retain_dropout - self.temperature = temperature - self.match_source_len = match_source_len - self.no_repeat_ngram_size = no_repeat_ngram_size - self.channel_models = channel_models - self.src_dict = src_dict - self.tgt_dict = tgt_dict - self.combine_method = combine_method - self.k2 = k2 - self.ch_weight = ch_weight - self.channel_scoring_type = channel_scoring_type - self.top_k_vocab = top_k_vocab - self.lm_models = lm_models - self.lm_dict = lm_dict - self.lm_weight = lm_weight - self.log_softmax_fn = torch.nn.LogSoftmax(dim=1) - self.normalize_lm_scores_by_tgt_len = normalize_lm_scores_by_tgt_len - - self.share_tgt_dict = (self.lm_dict == self.tgt_dict) - self.tgt_to_lm = make_dict2dict(tgt_dict, lm_dict) - - self.ch_scoring_bsz = 3072 - - assert temperature > 0, '--temperature must be greater than 0' - - self.search = NoisyChannelBeamSearch(tgt_dict) - - @torch.no_grad() - def generate( - self, - models, - sample, - prefix_tokens=None, - bos_token=None, - **kwargs - ): - """Generate a batch of translations. - Args: - models (List[~fairseq.models.FairseqModel]): ensemble of models - sample (dict): batch - prefix_tokens (torch.LongTensor, optional): force decoder to begin - with these tokens - """ - model = EnsembleModel(models) - incremental_states = torch.jit.annotate( - List[Dict[str, Dict[str, Optional[Tensor]]]], - [ - torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {}) - for i in range(model.models_size) - ], - ) - if not self.retain_dropout: - model.eval() - - # model.forward normally channels prev_output_tokens into the decoder - # separately, but SequenceGenerator directly calls model.encoder - encoder_input = { - k: v for k, v in sample['net_input'].items() - if k != 'prev_output_tokens' - } - src_tokens = encoder_input['src_tokens'] - src_lengths_no_eos = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1) - input_size = src_tokens.size() - # batch dimension goes first followed by source lengths - bsz = input_size[0] - src_len = input_size[1] - beam_size = self.beam_size - - if self.match_source_len: - max_len = src_lengths_no_eos.max().item() - else: - max_len = min( - int(self.max_len_a * src_len + self.max_len_b), - # exclude the EOS marker - model.max_decoder_positions() - 1, - ) - - # compute the encoder output for each beam - encoder_outs = model.forward_encoder(encoder_input) - new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1) - new_order = new_order.to(src_tokens.device).long() - encoder_outs = model.reorder_encoder_out(encoder_outs, new_order) - - src_lengths = encoder_input['src_lengths'] - # initialize buffers - scores = src_tokens.new(bsz * beam_size, max_len + 1).float().fill_(0) - lm_prefix_scores = src_tokens.new(bsz * beam_size).float().fill_(0) - - scores_buf = scores.clone() - tokens = src_tokens.new(bsz * beam_size, max_len + 2).long().fill_(self.pad) - tokens_buf = tokens.clone() - tokens[:, 0] = self.eos if bos_token is None else bos_token - - # reorder source tokens so they may be used as a reference in generating P(S|T) - src_tokens = reorder_all_tokens(src_tokens, src_lengths, self.src_dict.eos_index) - - src_tokens = src_tokens.repeat(1, beam_size).view(-1, src_len) - src_lengths = src_lengths.view(bsz, -1).repeat(1, beam_size).view(bsz*beam_size, -1) - - attn, attn_buf = None, None - nonpad_idxs = None - - # The cands_to_ignore indicates candidates that should be ignored. - # For example, suppose we're sampling and have already finalized 2/5 - # samples. Then the cands_to_ignore would mark 2 positions as being ignored, - # so that we only finalize the remaining 3 samples. - cands_to_ignore = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask - - # list of completed sentences - finalized = [[] for i in range(bsz)] - finished = [False for i in range(bsz)] - num_remaining_sent = bsz - - # number of candidate hypos per step - cand_size = 2 * beam_size # 2 x beam size in case half are EOS - - # offset arrays for converting between different indexing schemes - bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens) - cand_offsets = torch.arange(0, cand_size).type_as(tokens) - - # helper function for allocating buffers on the fly - buffers = {} - - def buffer(name, type_of=tokens): # noqa - if name not in buffers: - buffers[name] = type_of.new() - return buffers[name] - - def is_finished(sent, step, unfin_idx): - """ - Check whether we've finished generation for a given sentence, by - comparing the worst score among finalized hypotheses to the best - possible score among unfinalized hypotheses. - """ - assert len(finalized[sent]) <= beam_size - if len(finalized[sent]) == beam_size: - return True - return False - - def finalize_hypos(step, bbsz_idx, eos_scores, combined_noisy_channel_eos_scores): - """ - Finalize the given hypotheses at this step, while keeping the total - number of finalized hypotheses per sentence <= beam_size. - - Note: the input must be in the desired finalization order, so that - hypotheses that appear earlier in the input are preferred to those - that appear later. - - Args: - step: current time step - bbsz_idx: A vector of indices in the range [0, bsz*beam_size), - indicating which hypotheses to finalize - eos_scores: A vector of the same size as bbsz_idx containing - fw scores for each hypothesis - combined_noisy_channel_eos_scores: A vector of the same size as bbsz_idx containing - combined noisy channel scores for each hypothesis - """ - assert bbsz_idx.numel() == eos_scores.numel() - - # clone relevant token and attention tensors - tokens_clone = tokens.index_select(0, bbsz_idx) - tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS - assert not tokens_clone.eq(self.eos).any() - tokens_clone[:, step] = self.eos - attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step+2] if attn is not None else None - - # compute scores per token position - pos_scores = scores.index_select(0, bbsz_idx)[:, :step+1] - pos_scores[:, step] = eos_scores - # convert from cumulative to per-position scores - pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1] - - # normalize sentence-level scores - if self.normalize_scores: - combined_noisy_channel_eos_scores /= (step + 1) ** self.len_penalty - - cum_unfin = [] - prev = 0 - for f in finished: - if f: - prev += 1 - else: - cum_unfin.append(prev) - - sents_seen = set() - for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), combined_noisy_channel_eos_scores.tolist())): - unfin_idx = idx // beam_size - sent = unfin_idx + cum_unfin[unfin_idx] - - sents_seen.add((sent, unfin_idx)) - - if self.match_source_len and step > src_lengths_no_eos[unfin_idx]: - score = -math.inf - - def get_hypo(): - - if attn_clone is not None: - # remove padding tokens from attn scores - hypo_attn = attn_clone[i][nonpad_idxs[sent]] - _, alignment = hypo_attn.max(dim=0) - else: - hypo_attn = None - alignment = None - - return { - 'tokens': tokens_clone[i], - 'score': score, - 'attention': hypo_attn, # src_len x tgt_len - 'alignment': alignment, - 'positional_scores': pos_scores[i], - } - - if len(finalized[sent]) < beam_size: - finalized[sent].append(get_hypo()) - - newly_finished = [] - for sent, unfin_idx in sents_seen: - # check termination conditions for this sentence - if not finished[sent] and is_finished(sent, step, unfin_idx): - finished[sent] = True - newly_finished.append(unfin_idx) - return newly_finished - - def noisy_channel_rescoring(lprobs, beam_size, bsz, src_tokens, tokens, k): - """Rescore the top k hypothesis from each beam using noisy channel modeling - Returns: - new_fw_lprobs: the direct model probabilities after pruning the top k - new_ch_lm_lprobs: the combined channel and language model probabilities - new_lm_lprobs: the language model probabilities after pruning the top k - """ - with torch.no_grad(): - lprobs_size = lprobs.size() - if prefix_tokens is not None and step < prefix_tokens.size(1): - probs_slice = lprobs.view(bsz, -1, lprobs.size(-1))[:, 0, :] - cand_scores = torch.gather( - probs_slice, dim=1, - index=prefix_tokens[:, step].view(-1, 1).data - ).expand(-1, beam_size).contiguous().view(bsz*beam_size, 1) - cand_indices = prefix_tokens[:, step].view(-1, 1).expand(bsz, beam_size).data.contiguous().view(bsz*beam_size, 1) - - # need to calculate and save fw and lm probs for prefix tokens - fw_top_k = cand_scores - fw_top_k_idx = cand_indices - k = 1 - else: - # take the top k best words for every sentence in batch*beam - fw_top_k, fw_top_k_idx = torch.topk(lprobs.view(beam_size*bsz, -1), k=k) - eos_idx = torch.nonzero(fw_top_k_idx.view(bsz*beam_size*k, -1) == self.eos)[:, 0] - ch_scores = fw_top_k.new_full((beam_size*bsz*k, ), 0) - src_size = torch.sum(src_tokens[:, :] != self.src_dict.pad_index, dim=1, keepdim=True, dtype=fw_top_k.dtype) - - if self.combine_method != "lm_only": - temp_src_tokens_full = src_tokens[:, :].repeat(1, k).view(bsz*beam_size*k, -1) - not_padding = temp_src_tokens_full[:, 1:] != self.src_dict.pad_index - cur_tgt_size = step+2 - - # add eos to all candidate sentences except those that already end in eos - eos_tokens = tokens[:, 0].repeat(1, k).view(-1, 1) - eos_tokens[eos_idx] = self.tgt_dict.pad_index - - if step == 0: - channel_input = torch.cat((fw_top_k_idx.view(-1, 1), eos_tokens), 1) - else: - # move eos from beginning to end of target sentence - channel_input = torch.cat((tokens[:, 1:step + 1].repeat(1, k).view(-1, step), fw_top_k_idx.view(-1, 1), eos_tokens), 1) - - ch_input_lengths = torch.tensor(np.full(channel_input.size(0), cur_tgt_size)) - ch_input_lengths[eos_idx] = cur_tgt_size-1 - if self.channel_scoring_type == "unnormalized": - ch_encoder_output = channel_model.encoder(channel_input, src_lengths=ch_input_lengths) - ch_decoder_output, _ = channel_model.decoder(temp_src_tokens_full, encoder_out=ch_encoder_output, features_only=True) - del ch_encoder_output - ch_intermed_scores = channel_model.decoder.unnormalized_scores_given_target(ch_decoder_output, target_ids=temp_src_tokens_full[:, 1:]) - ch_intermed_scores = ch_intermed_scores.float() - ch_intermed_scores *= not_padding.float() - ch_scores = torch.sum(ch_intermed_scores, dim=1) - elif self.channel_scoring_type == "k2_separate": - for k_idx in range(k): - k_eos_tokens = eos_tokens[k_idx::k, :] - if step == 0: - k_ch_input = torch.cat((fw_top_k_idx[:, k_idx:k_idx+1], k_eos_tokens), 1) - else: - # move eos from beginning to end of target sentence - k_ch_input = torch.cat((tokens[:, 1:step + 1], fw_top_k_idx[:, k_idx:k_idx+1], k_eos_tokens), 1) - k_ch_input_lengths = ch_input_lengths[k_idx::k] - k_ch_output = channel_model(k_ch_input, k_ch_input_lengths, src_tokens) - k_ch_lprobs = channel_model.get_normalized_probs(k_ch_output, log_probs=True) - k_ch_intermed_scores = torch.gather(k_ch_lprobs[:, :-1, :], 2, src_tokens[:, 1:].unsqueeze(2)).squeeze(2) - k_ch_intermed_scores *= not_padding.float() - ch_scores[k_idx::k] = torch.sum(k_ch_intermed_scores, dim=1) - elif self.channel_scoring_type == "src_vocab": - ch_encoder_output = channel_model.encoder(channel_input, src_lengths=ch_input_lengths) - ch_decoder_output, _ = channel_model.decoder(temp_src_tokens_full, encoder_out=ch_encoder_output, features_only=True) - - del ch_encoder_output - ch_lprobs = normalized_scores_with_batch_vocab( - channel_model.decoder, - ch_decoder_output, src_tokens, k, bsz, beam_size, - self.src_dict.pad_index, top_k=self.top_k_vocab) - ch_scores = torch.sum(ch_lprobs, dim=1) - elif self.channel_scoring_type == "src_vocab_batched": - ch_bsz_size = temp_src_tokens_full.shape[0] - ch_lprobs_list = [None] * len(range(0, ch_bsz_size, self.ch_scoring_bsz)) - for i, start_idx in enumerate(range(0, ch_bsz_size, self.ch_scoring_bsz)): - end_idx = min(start_idx + self.ch_scoring_bsz, ch_bsz_size) - temp_src_tokens_full_batch = temp_src_tokens_full[start_idx:end_idx, :] - channel_input_batch = channel_input[start_idx:end_idx, :] - ch_input_lengths_batch = ch_input_lengths[start_idx:end_idx] - ch_encoder_output_batch = channel_model.encoder(channel_input_batch, src_lengths=ch_input_lengths_batch) - ch_decoder_output_batch, _ = channel_model.decoder(temp_src_tokens_full_batch, encoder_out=ch_encoder_output_batch, features_only=True) - ch_lprobs_list[i] = normalized_scores_with_batch_vocab( - channel_model.decoder, - ch_decoder_output_batch, src_tokens, k, bsz, beam_size, - self.src_dict.pad_index, top_k=self.top_k_vocab, - start_idx=start_idx, end_idx=end_idx) - ch_lprobs = torch.cat(ch_lprobs_list, dim=0) - ch_scores = torch.sum(ch_lprobs, dim=1) - else: - ch_output = channel_model(channel_input, ch_input_lengths, temp_src_tokens_full) - ch_lprobs = channel_model.get_normalized_probs(ch_output, log_probs=True) - ch_intermed_scores = torch.gather(ch_lprobs[:, :-1, :], 2, temp_src_tokens_full[:, 1:].unsqueeze(2)).squeeze().view(bsz*beam_size*k, -1) - ch_intermed_scores *= not_padding.float() - ch_scores = torch.sum(ch_intermed_scores, dim=1) - - else: - cur_tgt_size = 0 - ch_scores = ch_scores.view(bsz*beam_size, k) - expanded_lm_prefix_scores = lm_prefix_scores.unsqueeze(1).expand(-1, k).flatten() - - if self.share_tgt_dict: - lm_scores = get_lm_scores(lm, tokens[:, :step + 1].view(-1, step+1), lm_incremental_states, fw_top_k_idx.view(-1, 1), torch.tensor(np.full(tokens.size(0), step+1)), k) - else: - new_lm_input = dict2dict(tokens[:, :step + 1].view(-1, step+1), self.tgt_to_lm) - new_cands = dict2dict(fw_top_k_idx.view(-1, 1), self.tgt_to_lm) - lm_scores = get_lm_scores(lm, new_lm_input, lm_incremental_states, new_cands, torch.tensor(np.full(tokens.size(0), step+1)), k) - - lm_scores.add_(expanded_lm_prefix_scores) - ch_lm_scores = combine_ch_lm(self.combine_method, ch_scores, lm_scores, src_size, cur_tgt_size) - # initialize all as min value - new_fw_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1) - new_ch_lm_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1) - new_lm_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1) - new_fw_lprobs[:, self.pad] = -math.inf - new_ch_lm_lprobs[:, self.pad] = -math.inf - new_lm_lprobs[:, self.pad] = -math.inf - - new_fw_lprobs.scatter_(1, fw_top_k_idx, fw_top_k) - new_ch_lm_lprobs.scatter_(1, fw_top_k_idx, ch_lm_scores) - new_lm_lprobs.scatter_(1, fw_top_k_idx, lm_scores.view(-1, k)) - return new_fw_lprobs, new_ch_lm_lprobs, new_lm_lprobs - - def combine_ch_lm(combine_type, ch_scores, lm_scores1, src_size, tgt_size): - if self.channel_scoring_type == "unnormalized": - ch_scores = self.log_softmax_fn( - ch_scores.view(-1, self.beam_size * self.k2) - ).view(ch_scores.shape) - ch_scores = ch_scores * self.ch_weight - lm_scores1 = lm_scores1 * self.lm_weight - - if combine_type == "lm_only": - # log P(T|S) + log P(T) - ch_scores = lm_scores1.view(ch_scores.size()) - elif combine_type == "noisy_channel": - # 1/t log P(T|S) + 1/s log P(S|T) + 1/t log P(T) - if self.normalize_lm_scores_by_tgt_len: - ch_scores.div_(src_size) - lm_scores_norm = lm_scores1.view(ch_scores.size()).div(tgt_size) - ch_scores.add_(lm_scores_norm) - # 1/t log P(T|S) + 1/s log P(S|T) + 1/s log P(T) - else: - ch_scores.add_(lm_scores1.view(ch_scores.size())) - ch_scores.div_(src_size) - - return ch_scores - - if self.channel_models is not None: - channel_model = self.channel_models[0] # assume only one channel_model model - else: - channel_model = None - - lm = EnsembleModel(self.lm_models) - lm_incremental_states = torch.jit.annotate( - List[Dict[str, Dict[str, Optional[Tensor]]]], - [ - torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {}) - for i in range(lm.models_size) - ], - ) - - reorder_state = None - batch_idxs = None - for step in range(max_len + 1): # one extra step for EOS marker - # reorder decoder internal states based on the prev choice of beams - if reorder_state is not None: - if batch_idxs is not None: - # update beam indices to take into account removed sentences - corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs) - reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size) - model.reorder_incremental_state(incremental_states, reorder_state) - encoder_outs = model.reorder_encoder_out(encoder_outs, reorder_state) - - lm.reorder_incremental_state(lm_incremental_states, reorder_state) - - fw_lprobs, avg_attn_scores = model.forward_decoder( - tokens[:, :step + 1], encoder_outs, incremental_states, temperature=self.temperature, - ) - - fw_lprobs[:, self.pad] = -math.inf # never select pad - fw_lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty - fw_lprobs, ch_lm_lprobs, lm_lprobs = noisy_channel_rescoring(fw_lprobs, beam_size, bsz, src_tokens, tokens, self.k2) - - # handle min and max length constraints - if step >= max_len: - fw_lprobs[:, :self.eos] = -math.inf - fw_lprobs[:, self.eos + 1:] = -math.inf - elif step < self.min_len: - fw_lprobs[:, self.eos] = -math.inf - - # handle prefix tokens (possibly with different lengths) - if prefix_tokens is not None and step < prefix_tokens.size(1): - prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1) - prefix_mask = prefix_toks.ne(self.pad) - - prefix_fw_lprobs = fw_lprobs.gather(-1, prefix_toks.unsqueeze(-1)) - fw_lprobs[prefix_mask] = -math.inf - fw_lprobs[prefix_mask] = fw_lprobs[prefix_mask].scatter_( - -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_fw_lprobs - ) - - prefix_ch_lm_lprobs = ch_lm_lprobs.gather(-1, prefix_toks.unsqueeze(-1)) - ch_lm_lprobs[prefix_mask] = -math.inf - ch_lm_lprobs[prefix_mask] = ch_lm_lprobs[prefix_mask].scatter_( - -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_ch_lm_lprobs - ) - - prefix_lm_lprobs = lm_lprobs.gather(-1, prefix_toks.unsqueeze(-1)) - lm_lprobs[prefix_mask] = -math.inf - lm_lprobs[prefix_mask] = lm_lprobs[prefix_mask].scatter_( - -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lm_lprobs - ) - - # if prefix includes eos, then we should make sure tokens and - # scores are the same across all beams - eos_mask = prefix_toks.eq(self.eos) - if eos_mask.any(): - # validate that the first beam matches the prefix - first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1] - eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0] - target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step] - assert (first_beam == target_prefix).all() - - def replicate_first_beam(tensor, mask): - tensor = tensor.view(-1, beam_size, tensor.size(-1)) - tensor[mask] = tensor[mask][:, :1, :] - return tensor.view(-1, tensor.size(-1)) - - # copy tokens, scores and lprobs from the first beam to all beams - tokens = replicate_first_beam(tokens, eos_mask_batch_dim) - scores = replicate_first_beam(scores, eos_mask_batch_dim) - - fw_lprobs = replicate_first_beam(fw_lprobs, eos_mask_batch_dim) - ch_lm_lprobs = replicate_first_beam(ch_lm_lprobs, eos_mask_batch_dim) - lm_lprobs = replicate_first_beam(lm_lprobs, eos_mask_batch_dim) - - if self.no_repeat_ngram_size > 0: - # for each beam and batch sentence, generate a list of previous ngrams - gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)] - for bbsz_idx in range(bsz * beam_size): - gen_tokens = tokens[bbsz_idx].tolist() - for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]): - gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \ - gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]] - - # Record attention scores - if avg_attn_scores is not None: - if attn is None: - attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2) - attn_buf = attn.clone() - nonpad_idxs = src_tokens.ne(self.pad) - attn[:, :, step + 1].copy_(avg_attn_scores) - - scores = scores.type_as(fw_lprobs) - scores_buf = scores_buf.type_as(fw_lprobs) - - self.search.set_src_lengths(src_lengths_no_eos) - - if self.no_repeat_ngram_size > 0: - def calculate_banned_tokens(bbsz_idx): - # before decoding the next token, prevent decoding of ngrams that have already appeared - ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist()) - return gen_ngrams[bbsz_idx].get(ngram_index, []) - - if step + 2 - self.no_repeat_ngram_size >= 0: - # no banned tokens if we haven't generated no_repeat_ngram_size tokens yet - banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)] - else: - banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)] - - for bbsz_idx in range(bsz * beam_size): - fw_lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf - - combined_noisy_channel_scores, fw_lprobs_top_k, lm_lprobs_top_k, cand_indices, cand_beams = self.search.step( - step, - fw_lprobs.view(bsz, -1, self.vocab_size), - scores.view(bsz, beam_size, -1)[:, :, :step], ch_lm_lprobs.view(bsz, -1, self.vocab_size), - lm_lprobs.view(bsz, -1, self.vocab_size), self.combine_method - ) - - # cand_bbsz_idx contains beam indices for the top candidate - # hypotheses, with a range of values: [0, bsz*beam_size), - # and dimensions: [bsz, cand_size] - cand_bbsz_idx = cand_beams.add(bbsz_offsets) - - # finalize hypotheses that end in eos (except for candidates to be ignored) - eos_mask = cand_indices.eq(self.eos) - eos_mask[:, :beam_size] &= ~cands_to_ignore - - # only consider eos when it's among the top beam_size indices - eos_bbsz_idx = torch.masked_select( - cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size] - ) - - finalized_sents = set() - if eos_bbsz_idx.numel() > 0: - eos_scores = torch.masked_select( - fw_lprobs_top_k[:, :beam_size], mask=eos_mask[:, :beam_size] - ) - combined_noisy_channel_eos_scores = torch.masked_select( - combined_noisy_channel_scores[:, :beam_size], - mask=eos_mask[:, :beam_size], - ) - - # finalize hypo using channel model score - finalized_sents = finalize_hypos( - step, eos_bbsz_idx, eos_scores, combined_noisy_channel_eos_scores) - - num_remaining_sent -= len(finalized_sents) - - assert num_remaining_sent >= 0 - if num_remaining_sent == 0: - break - - if len(finalized_sents) > 0: - new_bsz = bsz - len(finalized_sents) - - # construct batch_idxs which holds indices of batches to keep for the next pass - batch_mask = cand_indices.new_ones(bsz) - batch_mask[cand_indices.new(finalized_sents)] = 0 - batch_idxs = torch.nonzero(batch_mask).squeeze(-1) - - eos_mask = eos_mask[batch_idxs] - cand_beams = cand_beams[batch_idxs] - bbsz_offsets.resize_(new_bsz, 1) - cand_bbsz_idx = cand_beams.add(bbsz_offsets) - - lm_lprobs_top_k = lm_lprobs_top_k[batch_idxs] - - fw_lprobs_top_k = fw_lprobs_top_k[batch_idxs] - cand_indices = cand_indices[batch_idxs] - if prefix_tokens is not None: - prefix_tokens = prefix_tokens[batch_idxs] - src_lengths_no_eos = src_lengths_no_eos[batch_idxs] - cands_to_ignore = cands_to_ignore[batch_idxs] - - scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) - scores_buf.resize_as_(scores) - tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) - tokens_buf.resize_as_(tokens) - src_tokens = src_tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) - src_lengths = src_lengths.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) - lm_prefix_scores = lm_prefix_scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1).squeeze() - - if attn is not None: - attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1) - attn_buf.resize_as_(attn) - bsz = new_bsz - else: - batch_idxs = None - - # Set active_mask so that values > cand_size indicate eos or - # ignored hypos and values < cand_size indicate candidate - # active hypos. After this, the min values per row are the top - # candidate active hypos. - eos_mask[:, :beam_size] |= cands_to_ignore - active_mask = torch.add( - eos_mask.type_as(cand_offsets) * cand_size, - cand_offsets[: eos_mask.size(1)], - ) - - # get the top beam_size active hypotheses, which are just the hypos - # with the smallest values in active_mask - active_hypos, new_cands_to_ignore = buffer('active_hypos'), buffer('new_cands_to_ignore') - torch.topk( - active_mask, k=beam_size, dim=1, largest=False, - out=(new_cands_to_ignore, active_hypos) - ) - - # update cands_to_ignore to ignore any finalized hypos - cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size] - assert (~cands_to_ignore).any(dim=1).all() - - active_bbsz_idx = buffer('active_bbsz_idx') - torch.gather( - cand_bbsz_idx, dim=1, index=active_hypos, - out=active_bbsz_idx, - ) - active_scores = torch.gather( - fw_lprobs_top_k, dim=1, index=active_hypos, - out=scores[:, step].view(bsz, beam_size), - ) - - active_bbsz_idx = active_bbsz_idx.view(-1) - active_scores = active_scores.view(-1) - - # copy tokens and scores for active hypotheses - torch.index_select( - tokens[:, :step + 1], dim=0, index=active_bbsz_idx, - out=tokens_buf[:, :step + 1], - ) - torch.gather( - cand_indices, dim=1, index=active_hypos, - out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1], - ) - if step > 0: - torch.index_select( - scores[:, :step], dim=0, index=active_bbsz_idx, - out=scores_buf[:, :step], - ) - torch.gather( - fw_lprobs_top_k, dim=1, index=active_hypos, - out=scores_buf.view(bsz, beam_size, -1)[:, :, step], - ) - torch.gather( - lm_lprobs_top_k, dim=1, index=active_hypos, - out=lm_prefix_scores.view(bsz, beam_size) - ) - - # copy attention for active hypotheses - if attn is not None: - torch.index_select( - attn[:, :, :step + 2], dim=0, index=active_bbsz_idx, - out=attn_buf[:, :, :step + 2], - ) - - # swap buffers - tokens, tokens_buf = tokens_buf, tokens - scores, scores_buf = scores_buf, scores - if attn is not None: - attn, attn_buf = attn_buf, attn - - # reorder incremental state in decoder - reorder_state = active_bbsz_idx - - # sort by score descending - for sent in range(len(finalized)): - finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True) - - return finalized - - -def get_lm_scores(model, input_tokens, incremental_states, cand_tokens, input_len, k): - with torch.no_grad(): - lm_lprobs, avg_attn_scores = model.forward_decoder( - input_tokens, encoder_outs=None, incremental_states=incremental_states, - ) - - lm_lprobs_size = lm_lprobs.size(0) - probs_next_wrd = torch.gather(lm_lprobs.repeat(1, k).view(lm_lprobs_size*k, -1), 1, cand_tokens).squeeze().view(-1) - - return probs_next_wrd - - -def make_dict2dict(old_dict, new_dict): - dict2dict_map = {} - for sym in old_dict.symbols: - dict2dict_map[old_dict.index(sym)] = new_dict.index(sym) - return dict2dict_map - - -def dict2dict(tokens, dict2dict_map): - if tokens.device == torch.device('cpu'): - tokens_tmp = tokens - else: - tokens_tmp = tokens.cpu() - return tokens_tmp.map_( - tokens_tmp, - lambda _, val, dict2dict_map=dict2dict_map : dict2dict_map[float(val)] - ).to(tokens.device) - - -def reorder_tokens(tokens, lengths, eos): - # reorder source tokens so they may be used as reference for P(S|T) - return torch.cat((tokens.new([eos]), tokens[-lengths:-1], tokens[:-lengths]), 0) - - -def reorder_all_tokens(tokens, lengths, eos): - # used to reorder src tokens from [ .. ] to [ ...] - # so source tokens can be used to predict P(S|T) - return torch.stack([reorder_tokens(token, length, eos) for token, length in zip(tokens, lengths)]) - - -def normalized_scores_with_batch_vocab( - model_decoder, features, target_ids, k, bsz, beam_size, - pad_idx, top_k=0, vocab_size_meter=None, start_idx=None, - end_idx=None, **kwargs): - """ - Get normalized probabilities (or log probs) from a net's output - w.r.t. vocab consisting of target IDs in the batch - """ - if model_decoder.adaptive_softmax is None: - weight = model_decoder.output_projection.weight - vocab_ids = torch.unique( - torch.cat( - (torch.unique(target_ids), torch.arange(top_k, device=target_ids.device)) - ) - ) - id_map = dict(zip(vocab_ids.tolist(), range(len(vocab_ids)))) - mapped_target_ids = target_ids.cpu().apply_( - lambda x, id_map=id_map: id_map[x] - ).to(target_ids.device) - expanded_target_ids = mapped_target_ids[:, :].repeat(1, k).view(bsz*beam_size*k, -1) - if start_idx is not None and end_idx is not None: - expanded_target_ids = expanded_target_ids[start_idx:end_idx, :] - logits = F.linear(features, weight[vocab_ids, :]) - log_softmax = F.log_softmax(logits, dim=-1, dtype=torch.float32) - intermed_scores = torch.gather( - log_softmax[:, :-1, :], - 2, - expanded_target_ids[:, 1:].unsqueeze(2), - ).squeeze() - not_padding = expanded_target_ids[:, 1:] != pad_idx - intermed_scores *= not_padding.float() - return intermed_scores - else: - raise ValueError("adaptive softmax doesn't work with " + - "`normalized_scores_with_batch_vocab()`") diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/steps_gan/train_sat.sh b/spaces/mshukor/UnIVAL/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/steps_gan/train_sat.sh deleted file mode 100644 index f75afafb1c4ad04ee71ab8541064ab0477430616..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/steps_gan/train_sat.sh +++ /dev/null @@ -1,281 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2012 Johns Hopkins University (Author: Daniel Povey). Apache 2.0. - - -# This does Speaker Adapted Training (SAT), i.e. train on -# fMLLR-adapted features. It can be done on top of either LDA+MLLT, or -# delta and delta-delta features. If there are no transforms supplied -# in the alignment directory, it will estimate transforms itself before -# building the tree (and in any case, it estimates transforms a number -# of times during training). - - -# Begin configuration section. -stage=-5 -exit_stage=-100 # you can use this to require it to exit at the - # beginning of a specific stage. Not all values are - # supported. -fmllr_update_type=full -cmd=run.pl -scale_opts="--transition-scale=1.0 --acoustic-scale=0.1 --self-loop-scale=0.1" -beam=10 -retry_beam=40 -careful=false -boost_silence=1.0 # Factor by which to boost silence likelihoods in alignment -context_opts= # e.g. set this to "--context-width 5 --central-position 2" for quinphone. -realign_iters="10 20 30"; -fmllr_iters="2 4 6 12"; -silence_weight=0.0 # Weight on silence in fMLLR estimation. -num_iters=35 # Number of iterations of training -max_iter_inc=25 # Last iter to increase #Gauss on. -power=0.2 # Exponent for number of gaussians according to occurrence counts -cluster_thresh=-1 # for build-tree control final bottom-up clustering of leaves -phone_map= -train_tree=true -tree_stats_opts= -cluster_phones_opts= -compile_questions_opts= -# End configuration section. -num_nonsil_states=3 - -echo "$0 $@" # Print the command line for logging - -[ -f path.sh ] && . ./path.sh -. parse_options.sh || exit 1; - -if [ $# != 6 ]; then - echo "Usage: steps/train_sat.sh <#leaves> <#gauss> " - echo " e.g.: steps/train_sat.sh 2500 15000 data/train_si84 data/lang exp/tri2b_ali_si84 exp/tri3b" - echo "Main options (for others, see top of script file)" - echo " --cmd (utils/run.pl|utils/queue.pl ) # how to run jobs." - echo " --config # config containing options" - echo " --stage # stage to do partial re-run from." - exit 1; -fi - -numleaves=$1 -totgauss=$2 -data=$3 -lang=$4 -alidir=$5 -dir=$6 - -for f in $data/feats.scp $lang/phones.txt $alidir/final.mdl $alidir/ali.1.gz; do - [ ! -f $f ] && echo "train_sat.sh: no such file $f" && exit 1; -done - -numgauss=$numleaves -incgauss=$[($totgauss-$numgauss)/$max_iter_inc] # per-iter #gauss increment -oov=`cat $lang/oov.int` -nj=`cat $alidir/num_jobs` || exit 1; -silphonelist=`cat $lang/phones/silence.csl` -ciphonelist=`cat $lang/phones/context_indep.csl` || exit 1; -sdata=$data/split$nj; -splice_opts=`cat $alidir/splice_opts 2>/dev/null` # frame-splicing options. -cmvn_opts=`cat $alidir/cmvn_opts 2>/dev/null` -delta_opts=`cat $alidir/delta_opts 2>/dev/null` -phone_map_opt= -[ ! -z "$phone_map" ] && phone_map_opt="--phone-map='$phone_map'" - -mkdir -p $dir/log -cp $alidir/splice_opts $dir 2>/dev/null # frame-splicing options. -cp $alidir/cmvn_opts $dir 2>/dev/null # cmn/cmvn option. -cp $alidir/delta_opts $dir 2>/dev/null # delta option. - -utils/lang/check_phones_compatible.sh $lang/phones.txt $alidir/phones.txt || exit 1; -cp $lang/phones.txt $dir || exit 1; - -echo $nj >$dir/num_jobs -[[ -d $sdata && $data/feats.scp -ot $sdata ]] || split_data.sh $data $nj || exit 1; - -# Set up features. - -if [ -f $alidir/final.mat ]; then feat_type=lda; else feat_type=delta; fi -echo "$0: feature type is $feat_type" - -## Set up speaker-independent features. -case $feat_type in - delta) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | add-deltas $delta_opts ark:- ark:- |";; - lda) sifeats="ark,s,cs:apply-cmvn $cmvn_opts --utt2spk=ark:$sdata/JOB/utt2spk scp:$sdata/JOB/cmvn.scp scp:$sdata/JOB/feats.scp ark:- | splice-feats $splice_opts ark:- ark:- | transform-feats $alidir/final.mat ark:- ark:- |" - cp $alidir/final.mat $dir - cp $alidir/full.mat $dir 2>/dev/null - ;; - *) echo "$0: invalid feature type $feat_type" && exit 1; -esac - -## Get initial fMLLR transforms (possibly from alignment dir) -if [ -f $alidir/trans.1 ]; then - echo "$0: Using transforms from $alidir" - feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$alidir/trans.JOB ark:- ark:- |" - cur_trans_dir=$alidir -else - if [ $stage -le -5 ]; then - echo "$0: obtaining initial fMLLR transforms since not present in $alidir" - # The next line is necessary because of $silphonelist otherwise being incorrect; would require - # old $lang dir which would require another option. Not needed anyway. - [ ! -z "$phone_map" ] && \ - echo "$0: error: you must provide transforms if you use the --phone-map option." && exit 1; - $cmd JOB=1:$nj $dir/log/fmllr.0.JOB.log \ - ali-to-post "ark:gunzip -c $alidir/ali.JOB.gz|" ark:- \| \ - weight-silence-post $silence_weight $silphonelist $alidir/final.mdl ark:- ark:- \| \ - gmm-est-fmllr --fmllr-update-type=$fmllr_update_type \ - --spk2utt=ark:$sdata/JOB/spk2utt $alidir/final.mdl "$sifeats" \ - ark:- ark:$dir/trans.JOB || exit 1; - fi - feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark,s,cs:$dir/trans.JOB ark:- ark:- |" - cur_trans_dir=$dir -fi - -if [ $stage -le -4 ] && $train_tree; then - # Get tree stats. - echo "$0: Accumulating tree stats" - $cmd JOB=1:$nj $dir/log/acc_tree.JOB.log \ - acc-tree-stats $context_opts $tree_stats_opts $phone_map_opt --ci-phones=$ciphonelist $alidir/final.mdl "$feats" \ - "ark:gunzip -c $alidir/ali.JOB.gz|" $dir/JOB.treeacc || exit 1; - [ "`ls $dir/*.treeacc | wc -w`" -ne "$nj" ] && echo "$0: Wrong #tree-accs" && exit 1; - $cmd $dir/log/sum_tree_acc.log \ - sum-tree-stats $dir/treeacc $dir/*.treeacc || exit 1; - rm $dir/*.treeacc -fi - -if [ $stage -le -3 ] && $train_tree; then - echo "$0: Getting questions for tree clustering." - # preparing questions, roots file... - cluster-phones --pdf-class-list=$(($num_nonsil_states / 2)) \ - $cluster_phones_opts $context_opts \ - $dir/treeacc $lang/phones/sets.int $dir/questions.int 2>$dir/log/questions.log || exit 1; - cat $lang/phones/extra_questions.int >> $dir/questions.int - compile-questions $context_opts $compile_questions_opts $lang/topo $dir/questions.int $dir/questions.qst 2>$dir/log/compile_questions.log || exit 1; - - echo "$0: Building the tree" - $cmd $dir/log/build_tree.log \ - build-tree $context_opts --verbose=1 --max-leaves=$numleaves \ - --cluster-thresh=$cluster_thresh $dir/treeacc $lang/phones/roots.int \ - $dir/questions.qst $lang/topo $dir/tree || exit 1; -fi - -if [ $stage -le -2 ]; then - echo "$0: Initializing the model" - if $train_tree; then - gmm-init-model --write-occs=$dir/1.occs \ - $dir/tree $dir/treeacc $lang/topo $dir/1.mdl 2> $dir/log/init_model.log || exit 1; - grep 'no stats' $dir/log/init_model.log && echo "This is a bad warning."; - rm $dir/treeacc - else - cp $alidir/tree $dir/ || exit 1; - $cmd JOB=1 $dir/log/init_model.log \ - gmm-init-model-flat $dir/tree $lang/topo $dir/1.mdl \ - "$feats subset-feats ark:- ark:-|" || exit 1; - fi -fi - -if [ $stage -le -1 ]; then - # Convert the alignments. - echo "$0: Converting alignments from $alidir to use current tree" - $cmd JOB=1:$nj $dir/log/convert.JOB.log \ - convert-ali $phone_map_opt $alidir/final.mdl $dir/1.mdl $dir/tree \ - "ark:gunzip -c $alidir/ali.JOB.gz|" "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; -fi - -[ "$exit_stage" -eq 0 ] && echo "$0: Exiting early: --exit-stage $exit_stage" && exit 0; - -if [ $stage -le 0 ] && [ "$realign_iters" != "" ]; then - echo "$0: Compiling graphs of transcripts" - $cmd JOB=1:$nj $dir/log/compile_graphs.JOB.log \ - compile-train-graphs --read-disambig-syms=$lang/phones/disambig.int $dir/tree $dir/1.mdl $lang/L.fst \ - "ark:utils/sym2int.pl --map-oov $oov -f 2- $lang/words.txt < $sdata/JOB/text |" \ - "ark:|gzip -c >$dir/fsts.JOB.gz" || exit 1; -fi - -x=1 -while [ $x -lt $num_iters ]; do - echo Pass $x - if echo $realign_iters | grep -w $x >/dev/null && [ $stage -le $x ]; then - echo Aligning data - mdl="gmm-boost-silence --boost=$boost_silence `cat $lang/phones/optional_silence.csl` $dir/$x.mdl - |" - $cmd JOB=1:$nj $dir/log/align.$x.JOB.log \ - gmm-align-compiled $scale_opts --beam=$beam --retry-beam=$retry_beam --careful=$careful "$mdl" \ - "ark:gunzip -c $dir/fsts.JOB.gz|" "$feats" \ - "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1; - fi - - if echo $fmllr_iters | grep -w $x >/dev/null; then - if [ $stage -le $x ]; then - echo Estimating fMLLR transforms - # We estimate a transform that's additional to the previous transform; - # we'll compose them. - $cmd JOB=1:$nj $dir/log/fmllr.$x.JOB.log \ - ali-to-post "ark:gunzip -c $dir/ali.JOB.gz|" ark:- \| \ - weight-silence-post $silence_weight $silphonelist $dir/$x.mdl ark:- ark:- \| \ - gmm-est-fmllr --fmllr-update-type=$fmllr_update_type \ - --spk2utt=ark:$sdata/JOB/spk2utt $dir/$x.mdl \ - "$feats" ark:- ark:$dir/tmp_trans.JOB || exit 1; - for n in `seq $nj`; do - ! ( compose-transforms --b-is-affine=true \ - ark:$dir/tmp_trans.$n ark:$cur_trans_dir/trans.$n ark:$dir/composed_trans.$n \ - && mv $dir/composed_trans.$n $dir/trans.$n && \ - rm $dir/tmp_trans.$n ) 2>$dir/log/compose_transforms.$x.log \ - && echo "$0: Error composing transforms" && exit 1; - done - fi - feats="$sifeats transform-feats --utt2spk=ark:$sdata/JOB/utt2spk ark:$dir/trans.JOB ark:- ark:- |" - cur_trans_dir=$dir - fi - - if [ $stage -le $x ]; then - $cmd JOB=1:$nj $dir/log/acc.$x.JOB.log \ - gmm-acc-stats-ali $dir/$x.mdl "$feats" \ - "ark,s,cs:gunzip -c $dir/ali.JOB.gz|" $dir/$x.JOB.acc || exit 1; - [ `ls $dir/$x.*.acc | wc -w` -ne "$nj" ] && echo "$0: Wrong #accs" && exit 1; - $cmd $dir/log/update.$x.log \ - gmm-est --power=$power --write-occs=$dir/$[$x+1].occs --mix-up=$numgauss $dir/$x.mdl \ - "gmm-sum-accs - $dir/$x.*.acc |" $dir/$[$x+1].mdl || exit 1; - rm $dir/$x.mdl $dir/$x.*.acc - rm $dir/$x.occs - fi - [ $x -le $max_iter_inc ] && numgauss=$[$numgauss+$incgauss]; - x=$[$x+1]; -done - - -if [ $stage -le $x ]; then - # Accumulate stats for "alignment model"-- this model is - # computed with the speaker-independent features, but matches Gaussian-for-Gaussian - # with the final speaker-adapted model. - $cmd JOB=1:$nj $dir/log/acc_alimdl.JOB.log \ - ali-to-post "ark:gunzip -c $dir/ali.JOB.gz|" ark:- \| \ - gmm-acc-stats-twofeats $dir/$x.mdl "$feats" "$sifeats" \ - ark,s,cs:- $dir/$x.JOB.acc || exit 1; - [ `ls $dir/$x.*.acc | wc -w` -ne "$nj" ] && echo "$0: Wrong #accs" && exit 1; - # Update model. - $cmd $dir/log/est_alimdl.log \ - gmm-est --power=$power --remove-low-count-gaussians=false $dir/$x.mdl \ - "gmm-sum-accs - $dir/$x.*.acc|" $dir/$x.alimdl || exit 1; - rm $dir/$x.*.acc -fi - -rm $dir/final.{mdl,alimdl,occs} 2>/dev/null -ln -s $x.mdl $dir/final.mdl -ln -s $x.occs $dir/final.occs -ln -s $x.alimdl $dir/final.alimdl - - -steps/diagnostic/analyze_alignments.sh --cmd "$cmd" $lang $dir - -utils/summarize_warnings.pl $dir/log -( - echo "$0: Likelihood evolution:" - for x in `seq $[$num_iters-1]`; do - tail -n 30 $dir/log/acc.$x.*.log | awk '/Overall avg like/{l += $(NF-3)*$(NF-1); t += $(NF-1); } - /Overall average logdet/{d += $(NF-3)*$(NF-1); t2 += $(NF-1);} - END{ d /= t2; l /= t; printf("%s ", d+l); } ' - done - echo -) | tee $dir/log/summary.log - - -steps/info/gmm_dir_info.pl $dir - -echo "$0: done training SAT system in $dir" - -exit 0 diff --git a/spaces/mshukor/UnIVAL/slurm_adastra/averaging/branching/refcoco/ofa_ratarefcocoplus_branchground.sh b/spaces/mshukor/UnIVAL/slurm_adastra/averaging/branching/refcoco/ofa_ratarefcocoplus_branchground.sh deleted file mode 100644 index 50bd5ac66c190a4425f85f683960f879174613e1..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/slurm_adastra/averaging/branching/refcoco/ofa_ratarefcocoplus_branchground.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -#SBATCH --job-name=ofa_ratarefcocoplus_branchground -#SBATCH --nodes=1 -#SBATCH --ntasks=1 -#SBATCH --gpus=8 -#SBATCH --threads-per-core=2 -#SBATCH --gpu-bind=closest -#SBATCH -C MI250 -#SBATCH -A gda2204 -#SBATCH --time=24:00:00 -#SBATCH --mail-type=END,FAIL -#SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_ratarefcocoplus_branchground.out -#SBATCH --exclusive -#SBATCH --mail-user=mustafa.shukor@isir.upmc.fr - - -cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts -source /lus/home/NAT/gda2204/mshukor/.bashrc - -conda activate main - - -rm core-python3* - - -srun -l -N 1 -n 1 -c 128 --gpus=8 bash averaging/branching/refcoco/ofa_ratarefcocoplus_branchground.sh - - diff --git a/spaces/mshukor/UnIVAL/slurm_adastra/averaging/caption/ofa_caption_stage_1_soup_lr2e5dropout02.sh b/spaces/mshukor/UnIVAL/slurm_adastra/averaging/caption/ofa_caption_stage_1_soup_lr2e5dropout02.sh deleted file mode 100644 index 73c94d4a90b551c59c7665dbced972376123b01c..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/slurm_adastra/averaging/caption/ofa_caption_stage_1_soup_lr2e5dropout02.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -#SBATCH --job-name=ofa_caption_stage_1_soup_lr2e5dropout02 -#SBATCH --nodes=1 -#SBATCH --ntasks=1 -#SBATCH --gpus=8 -#SBATCH --threads-per-core=2 -#SBATCH --gpu-bind=closest -####SBATCH --nodelist=x1004c4s2b0n0 -#SBATCH --time=24:00:00 -#SBATCH -C MI250 -#SBATCH -A gda2204 -#SBATCH --mail-type=END,FAIL -#SBATCH --output=/lus/home/NAT/gda2204/mshukor/logs/slurm/ofa_caption_stage_1_soup_lr2e5dropout02.out -#SBATCH --exclusive -#SBATCH --mail-user=mustafa.shukor@isir.upmc.fr - - -cd /lus/home/NAT/gda2204/mshukor/code/ofa_ours/run_scripts -source /lus/home/NAT/gda2204/mshukor/.bashrc - -conda activate main - - -rm core-python3* - - -srun -l -N 1 -n 1 -c 128 --gpus=8 bash averaging/caption/ofa_caption_stage_1_soup_lr2e5dropout02.sh - - diff --git a/spaces/mshukor/UnIVAL/slurm_adastra/averaging/fusing/t.sh b/spaces/mshukor/UnIVAL/slurm_adastra/averaging/fusing/t.sh deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/multimodalart/mariogpt/mario_gpt/prompter.py b/spaces/multimodalart/mariogpt/mario_gpt/prompter.py deleted file mode 100644 index b3d566540fecf238f12c5a488176d16a5f1f9151..0000000000000000000000000000000000000000 --- a/spaces/multimodalart/mariogpt/mario_gpt/prompter.py +++ /dev/null @@ -1,175 +0,0 @@ -from __future__ import annotations - -import random -from typing import Any, Dict, List, Optional, Tuple, Union - -import numpy as np -import torch -from scipy import stats -from transformers import pipeline - -from mario_gpt.dataset import MarioDataset -from mario_gpt.utils import view_level - -STATISTICS = { - "enemy": np.array([1.0, 3.0, 7.0]), - "pipe": np.array([0.0, 2.0, 5.0]), - "block": np.array([50.0, 75.0, 176.0]), -} - -FEATURE_EXTRACTION_MODEL = "facebook/bart-base" - - -class Prompter: - def __init__( - self, - level_tokenizer, - prompter_model: str = FEATURE_EXTRACTION_MODEL, - use_raw_counts: bool = False, - statistics: Optional[Dict[str, Any]] = None, - ): - self.prompter_model = prompter_model - self.feature_extraction = pipeline( - "feature-extraction", - model=prompter_model, - tokenizer=prompter_model, - framework="pt", - ) - - self.level_tokenizer = level_tokenizer - - self.use_raw_counts = use_raw_counts - self.statistics = statistics - if statistics is None: - self.statistics = STATISTICS - - @property - def pipe_thresholds(self) -> Tuple[List[int], List[str]]: - thresholds = self.statistics["pipe"] - keywords = ["no", "little", "some", "many"] - return thresholds, keywords - - @property - def enemy_thresholds(self) -> Tuple[List[int], List[str]]: - thresholds = self.statistics["enemy"] - keywords = ["no", "little", "some", "many"] - return thresholds, keywords - - @property - def block_thresholds(self) -> Tuple[List[int], List[str]]: - thresholds = self.statistics["block"] - keywords = ["little", "little", "some", "many"] - return thresholds, keywords - - def count_pipes(self, flattened_level: str) -> int: - return flattened_level.count("<>") - - def count_enemies(self, flattened_level: str) -> int: - return flattened_level.count("E") + flattened_level.count("B") - - def count_blocks(self, flattened_level: str) -> int: - return np.sum([flattened_level.count(char) for char in ["X", "S", "?", "Q"]]) - - def _flatten_level(self, string_level: List[str]) -> str: - return "".join(string_level) - - def pipe_prompt(self, flattened_level: str, level: str) -> str: - count = self.count_pipes(flattened_level) - keyword = f"{count}" - if not self.use_raw_counts: - thresholds, keywords = self.pipe_thresholds - threshold = np.digitize(count, thresholds, right=True) - keyword = keywords[threshold] - return f"{keyword} pipes", keyword - - def enemy_prompt(self, flattened_level: str, level: str) -> str: - count = self.count_enemies(flattened_level) - keyword = f"{count}" - if not self.use_raw_counts: - thresholds, keywords = self.enemy_thresholds - threshold = np.digitize(count, thresholds, right=True) - keyword = keywords[threshold] - return f"{keyword} enemies", keyword - - def block_prompt(self, flattened_level: str, level: str) -> str: - count = self.count_blocks(flattened_level) - keyword = f"{count}" - if not self.use_raw_counts: - thresholds, keywords = self.block_thresholds - threshold = np.digitize(count, thresholds, right=True) - keyword = keywords[threshold] - return f"{keyword} blocks", keyword - - def elevation_prompt(self, flattened_level: str, level: str): - top_levels = level[:6] # elevation 8 and up - for t in top_levels: - if "X" in t or "<" in t or ">" in t: - return "high elevation", "high" - return "low elevation", "low" - - def output_hidden(self, prompt: str, device: torch.device = torch.device("cpu")): - # Reducing along the first dimension to get a 768 dimensional array - return ( - self.feature_extraction(prompt, return_tensors="pt")[0] - .mean(0) - .to(device) - .view(1, -1) - ) - - def dataset_statistics(self, dataset: MarioDataset): - enemy_counts = [] - pipe_counts = [] - block_counts = [] - for i in range(len(dataset)): - level, _ = dataset[i] - str_level = self._flatten_level(view_level(level, dataset.tokenizer)) - - enemy_count = self.count_enemies(str_level) - pipe_count = self.count_pipes(str_level) - block_count = self.count_blocks(str_level) - - enemy_counts.append(enemy_count) - pipe_counts.append(pipe_count) - block_counts.append(block_count) - d = {"enemy": {}, "pipe": {}, "block": {}} - - d["enemy"] = stats.mstats.mquantiles(enemy_counts, [0.33, 0.66, 0.95]) - d["pipe"] = stats.mstats.mquantiles(pipe_counts, [0.33, 0.66, 0.95]) - d["block"] = stats.mstats.mquantiles(block_counts, [0.33, 0.66, 0.95]) - return d - - def __call__( - self, level: torch.Tensor = None, sample_prompt: bool = False - ) -> Union[str, torch.Tensor]: - device: torch.device = torch.device("cpu") - if not sample_prompt: - if level is None: - raise ValueError("Level must be provided if sample_prompt is not true!") - str_level = view_level(level, self.level_tokenizer) - flattened_level = self._flatten_level(str_level) - - pipe_prompt, _ = self.pipe_prompt(flattened_level, str_level) - enemy_prompt, _ = self.enemy_prompt(flattened_level, str_level) - block_prompt, _ = self.block_prompt(flattened_level, str_level) - elevation_prompt, _ = self.elevation_prompt(flattened_level, str_level) - device = level.device - else: - str_level = None - pipe_prompt = random.choice(["no", "little", "some", "many"]) + " pipes" - enemy_prompt = random.choice(["no", "little", "some", "many"]) + " enemies" - block_prompt = ( - random.choice(["little", "little", "some", "many"]) + " blocks" - ) # levels always have blocks - elevation_prompt = ( - random.choice(["low", "high"]) + " elevation" - ) # levels always have blocks - - prompt_dict = { - "pipe": pipe_prompt, - "enemy": enemy_prompt, - "block": block_prompt, - "elevation_prompt": elevation_prompt, - } - prompt = f"{pipe_prompt}, {enemy_prompt}, {block_prompt}, {elevation_prompt}" - hidden = self.output_hidden(prompt, device=device) - return prompt, hidden, prompt_dict, str_level diff --git a/spaces/nasttam/Image-and-3D-Model-Creator/PIFu/lib/colab_util.py b/spaces/nasttam/Image-and-3D-Model-Creator/PIFu/lib/colab_util.py deleted file mode 100644 index 608227b228647e7b1bc16676fadf22d68e381f57..0000000000000000000000000000000000000000 --- a/spaces/nasttam/Image-and-3D-Model-Creator/PIFu/lib/colab_util.py +++ /dev/null @@ -1,114 +0,0 @@ -import io -import os -import torch -from skimage.io import imread -import numpy as np -import cv2 -from tqdm import tqdm_notebook as tqdm -import base64 -from IPython.display import HTML - -# Util function for loading meshes -from pytorch3d.io import load_objs_as_meshes - -from IPython.display import HTML -from base64 import b64encode - -# Data structures and functions for rendering -from pytorch3d.structures import Meshes -from pytorch3d.renderer import ( - look_at_view_transform, - OpenGLOrthographicCameras, - PointLights, - DirectionalLights, - Materials, - RasterizationSettings, - MeshRenderer, - MeshRasterizer, - SoftPhongShader, - HardPhongShader, - TexturesVertex -) - -def set_renderer(): - # Setup - device = torch.device("cuda:0") - torch.cuda.set_device(device) - - # Initialize an OpenGL perspective camera. - R, T = look_at_view_transform(2.0, 0, 180) - cameras = OpenGLOrthographicCameras(device=device, R=R, T=T) - - raster_settings = RasterizationSettings( - image_size=512, - blur_radius=0.0, - faces_per_pixel=1, - bin_size = None, - max_faces_per_bin = None - ) - - lights = PointLights(device=device, location=((2.0, 2.0, 2.0),)) - - renderer = MeshRenderer( - rasterizer=MeshRasterizer( - cameras=cameras, - raster_settings=raster_settings - ), - shader=HardPhongShader( - device=device, - cameras=cameras, - lights=lights - ) - ) - return renderer - -def get_verts_rgb_colors(obj_path): - rgb_colors = [] - - f = open(obj_path) - lines = f.readlines() - for line in lines: - ls = line.split(' ') - if len(ls) == 7: - rgb_colors.append(ls[-3:]) - - return np.array(rgb_colors, dtype='float32')[None, :, :] - -def generate_video_from_obj(obj_path, video_path, renderer): - # Setup - device = torch.device("cuda:0") - torch.cuda.set_device(device) - - # Load obj file - verts_rgb_colors = get_verts_rgb_colors(obj_path) - verts_rgb_colors = torch.from_numpy(verts_rgb_colors).to(device) - textures = TexturesVertex(verts_features=verts_rgb_colors) - wo_textures = TexturesVertex(verts_features=torch.ones_like(verts_rgb_colors)*0.75) - - # Load obj - mesh = load_objs_as_meshes([obj_path], device=device) - - # Set mesh - vers = mesh._verts_list - faces = mesh._faces_list - mesh_w_tex = Meshes(vers, faces, textures) - mesh_wo_tex = Meshes(vers, faces, wo_textures) - - # create VideoWriter - fourcc = cv2. VideoWriter_fourcc(*'MP4V') - out = cv2.VideoWriter(video_path, fourcc, 20.0, (1024,512)) - - for i in tqdm(range(90)): - R, T = look_at_view_transform(1.8, 0, i*4, device=device) - images_w_tex = renderer(mesh_w_tex, R=R, T=T) - images_w_tex = np.clip(images_w_tex[0, ..., :3].cpu().numpy(), 0.0, 1.0)[:, :, ::-1] * 255 - images_wo_tex = renderer(mesh_wo_tex, R=R, T=T) - images_wo_tex = np.clip(images_wo_tex[0, ..., :3].cpu().numpy(), 0.0, 1.0)[:, :, ::-1] * 255 - image = np.concatenate([images_w_tex, images_wo_tex], axis=1) - out.write(image.astype('uint8')) - out.release() - -def video(path): - mp4 = open(path,'rb').read() - data_url = "data:video/mp4;base64," + b64encode(mp4).decode() - return HTML('' % data_url) diff --git a/spaces/nateraw/stable_diffusion_gallery/README.md b/spaces/nateraw/stable_diffusion_gallery/README.md deleted file mode 100644 index 4252d723c32211699899fbf04113dd6a62d9be5f..0000000000000000000000000000000000000000 --- a/spaces/nateraw/stable_diffusion_gallery/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Stable Diffusion Gallery -emoji: 💻 -colorFrom: red -colorTo: green -sdk: gradio -sdk_version: 3.4.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/nateraw/yolov6/yolov6/utils/checkpoint.py b/spaces/nateraw/yolov6/yolov6/utils/checkpoint.py deleted file mode 100644 index 686d6a7109d871718ff03378b3b74715f6f46d17..0000000000000000000000000000000000000000 --- a/spaces/nateraw/yolov6/yolov6/utils/checkpoint.py +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -import os -import shutil -import torch -import os.path as osp -from yolov6.utils.events import LOGGER -from yolov6.utils.torch_utils import fuse_model - - -def load_state_dict(weights, model, map_location=None): - """Load weights from checkpoint file, only assign weights those layers' name and shape are match.""" - ckpt = torch.load(weights, map_location=map_location) - state_dict = ckpt['model'].float().state_dict() - model_state_dict = model.state_dict() - state_dict = {k: v for k, v in state_dict.items() if k in model_state_dict and v.shape == model_state_dict[k].shape} - model.load_state_dict(state_dict, strict=False) - del ckpt, state_dict, model_state_dict - return model - - -def load_checkpoint(weights, map_location=None, inplace=True, fuse=True): - """Load model from checkpoint file.""" - LOGGER.info("Loading checkpoint from {}".format(weights)) - ckpt = torch.load(weights, map_location=map_location) # load - model = ckpt['ema' if ckpt.get('ema') else 'model'].float() - if fuse: - LOGGER.info("\nFusing model...") - model = fuse_model(model).eval() - else: - model = model.eval() - return model - - -def save_checkpoint(ckpt, is_best, save_dir, model_name=""): - """ Save checkpoint to the disk.""" - if not osp.exists(save_dir): - os.makedirs(save_dir) - filename = osp.join(save_dir, model_name + '.pt') - torch.save(ckpt, filename) - if is_best: - best_filename = osp.join(save_dir, 'best_ckpt.pt') - shutil.copyfile(filename, best_filename) - - -def strip_optimizer(ckpt_dir, epoch): - for s in ['best', 'last']: - ckpt_path = osp.join(ckpt_dir, '{}_ckpt.pt'.format(s)) - if not osp.exists(ckpt_path): - continue - ckpt = torch.load(ckpt_path, map_location=torch.device('cpu')) - if ckpt.get('ema'): - ckpt['model'] = ckpt['ema'] # replace model with ema - for k in ['optimizer', 'ema', 'updates']: # keys - ckpt[k] = None - ckpt['epoch'] = epoch - ckpt['model'].half() # to FP16 - for p in ckpt['model'].parameters(): - p.requires_grad = False - torch.save(ckpt, ckpt_path) diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Ledeno Doba 1 Sinkronizirano Na Hrvatski Torrent Download !LINK!.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Ledeno Doba 1 Sinkronizirano Na Hrvatski Torrent Download !LINK!.md deleted file mode 100644 index 894bc5c028cd92277170b83c7a86b19a4c86ae38..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Ledeno Doba 1 Sinkronizirano Na Hrvatski Torrent Download !LINK!.md +++ /dev/null @@ -1,50 +0,0 @@ - -Here is a possible title and article with SEO optimization and HTML formatting for the keyword "Ledeno Doba 1 Sinkronizirano Na Hrvatski Torrent Download": - -

      Ledeno Doba 1: Kako Preuzeti i Gledati Ovaj Animirani Klasik na Hrvatskom Jeziku

      - -

      Ledeno doba 1 je prvi dio popularne franšize o avanturama simpatičnih životinja u doba ledenjaka. Ovaj crtani film je izašao 2002. godine i osvojio je publiku i kritiku svojim humorom, akcijom i emocijama. Ako ste ljubitelj ovog filma i želite ga preuzeti i gledati na hrvatskom jeziku, evo nekoliko savjeta kako to učiniti.

      -

      Ledeno Doba 1 Sinkronizirano Na Hrvatski Torrent Download


      Download Filehttps://urlcod.com/2uIc0n



      - -

      Korak 1: Pronaći pouzdan izvor za preuzimanje torrenta

      - -

      Torrent je datoteka koja sadrži informacije o lokaciji i sadržaju drugih datoteka koje se mogu preuzeti putem peer-to-peer mreže. To znači da možete preuzeti film od drugih korisnika koji ga dijele na internetu. Međutim, trebate biti oprezni jer neki torrenti mogu biti lažni, zaraženi virusima ili kršiti autorska prava.

      - -

      Da biste pronašli pouzdan izvor za preuzimanje torrenta, možete koristiti neke od sljedećih web stranica:

      - -
        -
      • Balkandownload.org: Ova web stranica nudi veliki izbor sinhroniziranih crtanih filmova na srpskom, hrvatskom i slovenskom jeziku, uključujući i Ledeno doba 1. Samo trebate registrirati besplatan račun i zahvaliti se autoru posta da biste vidjeli skrivene linkove za preuzimanje torrenta.
      • -
      • Crtanifilmovielena.com: Ova web stranica je specijalizirana za crtane filmove na hrvatskom jeziku. Možete pronaći Ledeno doba 1 u titlovanoj verziji, kao i druge dijelove franÅ¡ize. Samo trebate kliknuti na link za preuzimanje torrenta koji se nalazi ispod opisa filma.
      • -
      • Docker Hub: Ovo je platforma za dijeljenje i upravljanje softverom u kontejnerima. Možete pronaći Ledeno doba 1 sinkronizirano na hrvatski u obliku Docker slike koju možete pokrenuti na svom računalu. Samo trebate slijediti upute na web stranici kako biste preuzeli i pokrenuli sliku.
      • -
      - -

      Korak 2: Preuzeti i instalirati torrent klijent

      - -

      Torrent klijent je program koji vam omogućuje preuzimanje i dijeljenje datoteka putem torrent protokola. Postoji mnogo različitih torrent klijenata koje možete koristiti, ali neki od najpopularnijih su:

      - -
        -
      • Titlovi.com ili Opensubtitles.org.

        - -

        Zaključak

        - -

        Ledeno doba 1 je zabavan i poučan crtani film koji će vas nasmijati i raznježiti. Ako želite preuzeti i gledati ovaj film na hrvatskom jeziku, samo trebate slijediti ove jednostavne korake:

        - -
          -
        1. Pronaći pouzdan izvor za preuzimanje torrenta za Ledeno doba 1 sinkronizirano na hrvatski.
        2. -
        3. Preuzeti i instalirati torrent klijent koji vam omogućuje preuzimanje i dijeljenje datoteka putem torrent protokola.
        4. -
        5. Otvoriti torrent datoteku i pokrenuti preuzimanje filma.
        6. -
        7. Pregledati film u svom medijskom playeru i dodati titlove ili sinkronizaciju po potrebi.
        8. -
        - -

        Uživajte u gledanju Ledenog doba 1 na hrvatskom jeziku i ne zaboravite podijeliti svoje dojmove s nama u komentarima!

        7196e7f11a
        -
        -
        \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Nomad Factory Liquid Bundle V1.6 TALiO. Setup Free UPDATED.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Nomad Factory Liquid Bundle V1.6 TALiO. Setup Free UPDATED.md deleted file mode 100644 index 7a6f7fd07eb833943f4d5d66827632a582da3234..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Nomad Factory Liquid Bundle V1.6 TALiO. Setup Free UPDATED.md +++ /dev/null @@ -1,142 +0,0 @@ - -

        Nomad Factory Liquid Bundle v1.6 TALiO: A Review of the Ultimate Audio Processing Plug-ins

        -

        If you are looking for a comprehensive set of audio processing plug-ins that can handle any kind of sound design, mixing, mastering, or creative effects, you might want to check out Nomad Factory Liquid Bundle v1.6 TALiO. This bundle contains six high-quality plug-ins that cover all the essential aspects of digital signal processing: compression, delay, gate, modulation, phase, and reverb. And the best part is, you can get it for free!

        -

        Nomad Factory Liquid Bundle v1.6 TALiO. setup free


        DOWNLOADhttps://urlcod.com/2uI9JY



        -

        In this article, we will give you an in-depth review of each plug-in in the bundle, compare it with other similar products on the market, and show you how to install and use it in your own projects. By the end of this article, you will have a clear idea of what Nomad Factory Liquid Bundle v1.6 TALiO can do for you and how to get it for free.

        -

        A detailed review of each plug-in in the bundle

        -

        Nomad Factory Liquid Bundle v1.6 TALiO consists of six plug-ins that are designed to work together or separately, depending on your needs. Each plug-in has its own unique features and parameters that allow you to shape and enhance your sound in various ways. Here is a brief overview of each plug-in:

        -

        Liquid Compressor II

        -

        Liquid Compressor II is a versatile compressor that can be used for both subtle and extreme compression effects. It has a simple interface that lets you adjust the threshold, ratio, attack, release, output gain, and makeup gain. It also has a sidechain input that lets you trigger the compression from another source, such as a kick drum or a vocal track. You can also choose from four different compression modes: soft knee, hard knee, vintage, and brick wall.

        -

        Liquid Compressor II is great for controlling the dynamics of your tracks, adding punch and presence, or creating pumping and ducking effects. You can use it on individual tracks or on buses and subgroups.

        -

        Liquid Delays II

        -

        Liquid Delays II is a powerful delay plug-in that can create everything from simple echoes to complex rhythmic patterns. It has two independent delay lines that can be synced to your host tempo or set manually in milliseconds. You can also adjust the feedback, pan, mix, and modulation depth and rate for each delay line. Additionally, you can apply filters, distortion, saturation, and bit reduction to each delay line for more creative possibilities.

        -

        Liquid Delays II is perfect for adding depth, width, movement, and texture to your sounds. You can use it on vocals, guitars , synths, drums, or any other instrument. You can also create interesting rhythmic effects by using different delay times and feedback levels for each delay line.

        -

        Liquid Gate II

        -

        Liquid Gate II is a flexible gate plug-in that can be used to remove unwanted noise, silence, or bleed from your tracks. It has a simple interface that lets you adjust the threshold, range, attack, hold, and release. It also has a sidechain input that lets you trigger the gate from another source, such as a snare drum or a hi-hat. You can also choose from four different gate modes: normal, duck, expand, and invert.

        -

        Liquid Gate II is useful for cleaning up your tracks, enhancing the transients, or creating gated reverb effects. You can use it on vocals, guitars, drums, or any other instrument that needs some gating.

        -

        Liquid Mod II

        -

        Liquid Mod II is a versatile modulation plug-in that can create a wide range of effects, such as chorus, flanger, phaser, tremolo, vibrato, and more. It has two independent modulation sources that can be synced to your host tempo or set manually in hertz. You can also adjust the depth, rate, phase, shape, and feedback for each modulation source. Additionally, you can apply filters and distortion to the modulated signal for more sonic possibilities.

        -

        -

        Liquid Mod II is ideal for adding movement, warmth, richness, and character to your sounds. You can use it on vocals, guitars , synths, drums, or any other instrument that needs some modulation.

        -

        Liquid Phase II

        -

        Liquid Phase II is a unique phase plug-in that can create subtle or extreme phase shifting effects. It has two independent phase shifters that can be synced to your host tempo or set manually in degrees. You can also adjust the depth, rate, feedback, and mix for each phase shifter. Additionally, you can apply filters and distortion to the phase shifted signal for more harmonic possibilities.

        -

        Liquid Phase II is great for adding color, movement, and dimension to your sounds. You can use it on vocals, guitars, synths, drums, or any other instrument that needs some phase shifting.

        -

        Liquid Verb II

        -

        Liquid Verb II is a lush reverb plug-in that can create realistic or surreal reverb effects. It has a simple interface that lets you adjust the size, decay, damping, diffusion, pre-delay, and mix of the reverb. It also has a modulation section that lets you add some movement and variation to the reverb tail. You can also choose from four different reverb modes: hall, room, plate, and spring.

        -

        Liquid Verb II is excellent for adding space, depth, and ambience to your sounds. You can use it on vocals, guitars, synths, drums, or any other instrument that needs some reverb.

        -

        A comparison of Nomad Factory Liquid Bundle v1.6 TALiO with other similar products

        -

        Nomad Factory Liquid Bundle v1.6 TALiO is not the only audio processing bundle on the market. There are many other products that offer similar or different features and benefits. Here are some of the most popular ones and how they compare with Nomad Factory Liquid Bundle v1.6 TALiO:

        - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        ProductPriceNumber of plug-insMain featuresProsCons
        Nomad Factory Liquid Bundle v1.6 TALiOFree6Compression, delay, gate, modulation, phase, and reverb plug-ins with filters, distortion, saturation, bit reduction, and modulation options.- High-quality sound
        - Easy to use interface
        - Flexible and creative possibilities
        - Free to download
        - No EQ plug-in
        - No presets included
        - Requires authorization
        Waves Gold Bundle$199 (regular price $799)42A comprehensive collection of audio processing plug-ins that cover EQ, compression, reverb, delay, modulation, pitch correction, noise reduction, restoration, mastering, and more.- Industry-standard quality
        - Wide range of plug-ins
        - Presets from famous engineers
        - Lifetime updates
        - Expensive (even with discount)
        - Complex interface
        - High CPU usage
        - Requires iLok
        iZotope Ozone Elements$29 (regular price $129)1 (mastering suite)A simplified version of iZotope's flagship mastering software that includes EQ, compression , and maximizer modules with intelligent processing and presets.- Professional sound
        - Easy to use interface
        - Intelligent processing and presets
        - Affordable price
        - Limited features
        - No individual plug-ins
        - No modulation or creative effects
        - Requires authorization
        FabFilter Pro Bundle$89916A premium collection of audio processing plug-ins that cover EQ, compression, reverb, delay, distortion, saturation, multiband dynamics, limiter, gate, de-esser, and more.- High-end quality
        - Innovative and flexible interface
        - Advanced features and options
        - Presets and help files
        - Very expensive
        - Steep learning curve
        - Overkill for some users
        - Requires activation
        -

        As you can see, Nomad Factory Liquid Bundle v1.6 TALiO has its own advantages and disadvantages compared to other products. It may not have as many plug-ins or features as some of the more expensive bundles, but it offers a great balance of quality, simplicity, flexibility, and creativity. And the fact that you can get it for free makes it even more appealing.

        -

        A tutorial on how to install and use Nomad Factory Liquid Bundle v1.6 TALiO

        -

        Now that you have learned about the features and benefits of Nomad Factory Liquid Bundle v1.6 TALiO, you might be wondering how to get it and use it in your own projects. Don't worry, we have got you covered. Here are the steps you need to follow to install and use Nomad Factory Liquid Bundle v1.6 TALiO:

        -

        System requirements and compatibility

        -

        Before you download and install Nomad Factory Liquid Bundle v1.6 TALiO, you need to make sure that your system meets the minimum requirements and that your DAW or host application is compatible with the plug-ins. Here are the system requirements and compatibility information for Nomad Factory Liquid Bundle v1.6 TALiO:

        -
          -
        • Windows: Windows XP/Vista/7/8/10 (32-bit or 64-bit)
        • -
        • Mac: Mac OS X 10.4 or higher (32-bit or 64-bit)
        • -
        • CPU: Intel Pentium 4 or AMD Athlon XP or higher (recommended: Intel Core 2 Duo or AMD Athlon 64 X2 or higher)
        • -
        • RAM: 512 MB or higher (recommended: 1 GB or higher)
        • -
        • Hard disk space: 100 MB or higher
        • -
        • Screen resolution: 1024 x 768 or higher
        • -
        • Audio interface: ASIO (Windows) or Core Audio (Mac) compatible
        • -
        • DAW or host application: VST, AU, RTAS, or AAX compatible (32-bit or 64-bit)
        • -
        -

        If your system meets these requirements and your DAW or host application supports these formats, you are good to go.

        -

        Download and installation steps

        -

        The next step is to download and install Nomad Factory Liquid Bundle v1.6 TALiO on your system. Here are the steps you need to follow:

        -
          -
        1. Go to this link: https://audioz.download/software/win/10688-download_nomad-factory-liquid-bundle-v16-talio.html
        2. -
        3. Click on the "Download" button and choose a mirror site from the list.
        4. -
        5. Wait for the download to finish and extract the ZIP file to a folder on your hard drive.
        6. -
        7. Open the folder and run the setup.exe file (Windows) or the setup.dmg file (Mac).
        8. -
        9. Follow the instructions on the screen and choose the destination folder and the plug-in formats you want to install.
        10. -
        11. Wait for the installation to complete and close the setup wizard.
        12. -
        -

        Congratulations, you have successfully installed Nomad Factory Liquid Bundle v1.6 TALiO on your system.

        -

        Activation and authorization process

        -

        The last step before you can use Nomad Factory Liquid Bundle v1.6 TALiO is to activate and authorize the plug-ins. Here are the steps you need to follow:

        -
          -
        1. Open your DAW or host application and load one of the plug-ins from Nomad Factory Liquid Bundle v1.6 TALiO.
        2. -
        3. You will see a pop-up window asking you to enter your serial number.
        4. -
        5. Go back to the folder where you extracted the ZIP file and open the file named "Serial.txt".
        6. -
        7. Copy the serial number and paste it into the pop-up window.
        8. -
        9. Click on the "Authorize" button and wait for the confirmation message.
        10. -
        11. Repeat this process for each plug-in in Nomad Factory Liquid Bundle v1.6 TALiO.
        12. -
        -

        That's it, you have successfully activated and authorized Nomad Factory Liquid Bundle v1.6 TALiO on your system.

        -

        How to use the plug-ins in your DAW or standalone mode

        -

        Now that you have installed, activated, and authorized Nomad Factory Liquid Bundle v1.6 TALiO, you are ready to use it in your own projects. Here are some tips on how to use the plug-ins in your DAW or standalone mode:

        -
          -
        • To use the plug-ins in your DAW, simply load them as inserts or sends on your tracks, buses, or master channel. You can also use them as effects on audio clips or regions.
        • -
        • To use the plug-ins in standalone mode, run the application named "Nomad Factory Liquid Bundle v1.6 TALiO.exe" (Windows) or "Nomad Factory Liquid Bundle v1.6 TALiO.app" (Mac) from the folder where you installed the plug-ins. You can then load audio files or record audio from your audio interface and apply the plug-ins as effects.
        • -
        • To adjust the parameters of the plug-ins, use your mouse to drag the knobs, sliders, buttons, or switches. You can also use your keyboard to enter values or use your MIDI controller to assign and automate parameters.
        • -
        • To access the menus and options of the plug-ins, right-click on the interface or click on the Nomad Factory logo. You can then access features such as bypass, undo, redo, copy, paste, reset, compare, help, and about.
        • -
        • To save and load your own presets with the plug-ins, click on the preset menu at the top of the interface. You can then browse, select, rename, delete, save, and load presets from your hard drive or from the factory presets included with the plug-ins.
        • -
        -

        These are some of the basic ways to use Nomad Factory Liquid Bundle v1.6 TALiO in your own projects. Of course, you can experiment with different settings and combinations of plug-ins to achieve different results and effects.

        -

        Conclusion

        -

        Nomad Factory Liquid Bundle v1.6 TALiO is a fantastic audio processing bundle that offers six high-quality plug-ins that cover all the essential aspects of digital signal processing: compression, delay, gate, modulation, phase, and reverb. Each plug-in has its own unique features and parameters that allow you to shape and enhance your sound in various ways. You can also apply filters, distortion , saturation, bit reduction, and modulation to each plug-in for more creative possibilities. And the best part is, you can get it for free! Nomad Factory Liquid Bundle v1.6 TALiO is not the only audio processing bundle on the market, but it offers a great balance of quality, simplicity, flexibility, and creativity. It may not have as many plug-ins or features as some of the more expensive bundles, but it has everything you need to create professional and amazing sounds. In this article, we have given you an in-depth review of each plug-in in the bundle, compared it with other similar products on the market, and showed you how to install and use it in your own projects. We hope that this article has been helpful and informative for you. If you are interested in trying out Nomad Factory Liquid Bundle v1.6 TALiO for yourself, you can download it for free from this link: https://audioz.download/software/win/10688-download_nomad-factory-liquid-bundle-v16-talio.html. You will need to enter a serial number that is included in the ZIP file to activate and authorize the plug-ins. You can then use them in your DAW or standalone mode and enjoy the amazing sound quality and versatility of Nomad Factory Liquid Bundle v1.6 TALiO. Don't miss this opportunity to get one of the best audio processing bundles for free. Download Nomad Factory Liquid Bundle v1.6 TALiO today and unleash your creativity and potential with these awesome plug-ins.

        -

        FAQs

        -

        Here are some of the most frequently asked questions about Nomad Factory Liquid Bundle v1.6 TALiO and their answers:

        -

        What is digital signal processing and why is it important for audio production?

        -

        Digital signal processing (DSP) is the process of manipulating digital audio signals using mathematical algorithms and techniques. DSP can be used to perform various tasks, such as filtering, equalizing, compressing, delaying, modulating, phasing, reverberating, and more. DSP is important for audio production because it allows you to shape and enhance your sound in various ways, depending on your needs and preferences.

        -

        What is sound design and how can Nomad Factory Liquid Bundle v1.6 TALiO help you with it?

        -

        Sound design is the art and science of creating and manipulating sound for various purposes, such as music, film, video games, theater, or multimedia. Sound design can involve recording, synthesizing, sampling, editing, mixing, mastering, or processing sound using various tools and techniques. Nomad Factory Liquid Bundle v1.6 TALiO can help you with sound design by providing you with a comprehensive set of plug-ins that can handle any kind of sound design task. You can use them to create realistic or surreal sounds, add depth or width to your sounds , or create interesting effects and textures with your sounds.

        -

        How can you customize and save your own presets with Nomad Factory Liquid Bundle v1.6 TALiO?

        -

        Nomad Factory Liquid Bundle v1.6 TALiO allows you to customize and save your own presets with each plug-in. To do so, you need to follow these steps:

        -
          -
        1. Adjust the parameters of the plug-in to your liking.
        2. -
        3. Click on the preset menu at the top of the interface and select "Save As".
        4. -
        5. Enter a name for your preset and click "OK".
        6. -
        7. Your preset will be saved in the folder where you installed the plug-ins, under the subfolder "Presets".
        8. -
        9. To load your preset, click on the preset menu again and select your preset from the list.
        10. -
        -

        You can also browse, rename, delete, copy, and paste presets from the preset menu.

        -

        How can you get technical support and updates for Nomad Factory Liquid Bundle v1.6 TALiO?

        -

        If you need technical support or updates for Nomad Factory Liquid Bundle v1.6 TALiO, you can contact Nomad Factory through their website: https://www.nomadfactory.com/. You can also check their FAQ section, user manual, and online forum for more information and help.

        -

        Is Nomad Factory Liquid Bundle v1.6 TALiO legal and safe to download?

        -

        Nomad Factory Liquid Bundle v1.6 TALiO is a legal and safe product that is offered for free by Nomad Factory as a promotion. However, you should always download it from a trusted source, such as the link we provided in this article: https://audioz.download/software/win/10688-download_nomad-factory-liquid-bundle-v16-talio.html. You should also scan the ZIP file with an antivirus software before extracting it to avoid any potential malware or viruses.

        b2dd77e56b
        -
        -
        \ No newline at end of file diff --git a/spaces/nev/dalle-6D/app.py b/spaces/nev/dalle-6D/app.py deleted file mode 100644 index 7e96e775fa882cf42ad19de735187887138f8f81..0000000000000000000000000000000000000000 --- a/spaces/nev/dalle-6D/app.py +++ /dev/null @@ -1,141 +0,0 @@ -import os -os.environ["PYOPENGL_PLATFORM"] = "egl" -from tqdm.auto import trange -from PIL import Image -import gradio as gr -import numpy as np -import pyrender -import trimesh -import scipy -import torch -import cv2 - - -class MidasDepth(object): - def __init__(self, model_type="DPT_Large", device=torch.device("cuda" if torch.cuda.is_available() else "cpu")): - self.device = device - self.midas = torch.hub.load("intel-isl/MiDaS", model_type).to(self.device).eval().requires_grad_(False) - self.transform = torch.hub.load("intel-isl/MiDaS", "transforms").dpt_transform - - def get_depth(self, image): - if not isinstance(image, np.ndarray): - image = np.asarray(image) - if (image > 1).any(): - image = image.astype("float64") / 255. - with torch.inference_mode(): - batch = self.transform(image[..., :3]).to(self.device) - prediction = self.midas(batch) - prediction = torch.nn.functional.interpolate( - prediction.unsqueeze(1), - size=image.shape[:2], - mode="bicubic", - align_corners=False, - ).squeeze() - return prediction.detach().cpu().numpy() - - -def process_depth(dep): - depth = dep.copy() - depth -= depth.min() - depth /= depth.max() - depth = 1 / np.clip(depth, 0.2, 1) - blurred = cv2.medianBlur(depth, 5) # 9 not available because it requires 8-bit - maxd = cv2.dilate(blurred, np.ones((3, 3))) - mind = cv2.erode(blurred, np.ones((3, 3))) - edges = maxd - mind - threshold = .05 # Better to have false positives - pick_edges = edges > threshold - return depth, pick_edges - - -def make_mesh(pic, depth, pick_edges): - faces = [] - im = np.asarray(pic) - grid = np.mgrid[0:im.shape[0], 0:im.shape[1]].transpose(1, 2, 0 - ).reshape(-1, 2)[..., ::-1] - flat_grid = grid[:, 1] * im.shape[1] + grid[:, 0] - positions = np.concatenate(((grid - np.array(im.shape[:-1])[np.newaxis, :] - / 2) / im.shape[1] * 2, - depth.flatten()[flat_grid][..., np.newaxis]), - axis=-1) - positions[:, :-1] *= positions[:, -1:] - positions[:, 1] *= -1 - colors = im.reshape(-1, 3)[flat_grid] - - c = lambda x, y: y * im.shape[1] + x - for y in trange(im.shape[0]): - for x in range(im.shape[1]): - if pick_edges[y, x]: - continue - if x > 0 and y > 0: - faces.append([c(x, y), c(x, y - 1), c(x - 1, y)]) - if x < im.shape[1] - 1 and y < im.shape[0] - 1: - faces.append([c(x, y), c(x, y + 1), c(x + 1, y)]) - face_colors = np.asarray([colors[i[0]] for i in faces]) - - tri_mesh = trimesh.Trimesh(vertices=positions * np.array([1.0, 1.0, -1.0]), - faces=faces, - face_colors=np.concatenate((face_colors, - face_colors[..., -1:] - * 0 + 255), - axis=-1).reshape(-1, 4), - smooth=False, - ) - - return tri_mesh - - -def args_to_mat(tx, ty, tz, rx, ry, rz): - mat = np.eye(4) - mat[:3, :3] = scipy.spatial.transform.Rotation.from_euler("XYZ", (rx, ry, rz)).as_matrix() - mat[:3, 3] = tx, ty, tz - return mat - - -def render(mesh, mat): - mesh = pyrender.mesh.Mesh.from_trimesh(mesh, smooth=False) - scene = pyrender.Scene(ambient_light=np.array([1.0, 1.0, 1.0])) - camera = pyrender.PerspectiveCamera(yfov=np.pi / 2, aspectRatio=1.0) - scene.add(camera, pose=mat) - scene.add(mesh) - r = pyrender.OffscreenRenderer(1024, 1024) - rgb, d = r.render(scene, pyrender.constants.RenderFlags.FLAT) - mask = d == 0 - rgb = rgb.copy() - rgb[mask] = 0 - res = Image.fromarray(np.concatenate((rgb, ((mask[..., np.newaxis]) == 0).astype(np.uint8) * 255), axis=-1)) - return res - - -def main(): - from pyvirtualdisplay import Display - disp = Display() - disp.start() - - midas = MidasDepth() - def fn(pic, *args): - depth, pick_edges = process_depth(midas.get_depth(pic)) - mesh = make_mesh(pic, depth, pick_edges) - frame = render(mesh, args_to_mat(*args)) - return np.asarray(frame), (255 / np.asarray(depth)).astype(np.uint8), None - - interface = gr.Interface(fn=fn, inputs=[ - gr.inputs.Image(label="src", type="numpy"), - gr.inputs.Number(label="tx", default=0.0), - gr.inputs.Number(label="ty", default=0.0), - gr.inputs.Number(label="tz", default=0.0), - gr.inputs.Number(label="rx", default=0.0), - gr.inputs.Number(label="ry", default=0.0), - gr.inputs.Number(label="rz", default=0.0) - ], outputs=[ - gr.outputs.Image(type="numpy", label="result"), - gr.outputs.Image(type="numpy", label="depth"), - gr.outputs.Video(label="interpolated") - ], title="DALL·E 6D", description="Lift DALL·E 2 (or any other model) into 3D!") - gr.TabbedInterface([interface], ["Warp 3D images"]).launch() - - disp.stop() - - -if __name__ == '__main__': - main() diff --git a/spaces/nota-ai/compressed-stable-diffusion/app.py b/spaces/nota-ai/compressed-stable-diffusion/app.py deleted file mode 100644 index 6f57fc38b173d96761b07fffea6bfdad2dd9c57b..0000000000000000000000000000000000000000 --- a/spaces/nota-ai/compressed-stable-diffusion/app.py +++ /dev/null @@ -1,74 +0,0 @@ -import os -import subprocess -from pathlib import Path - -import gradio as gr -import torch - -from demo import SdmCompressionDemo - -if __name__ == "__main__": - device = 'cuda' if torch.cuda.is_available() else 'cpu' - servicer = SdmCompressionDemo(device) - example_list = servicer.get_example_list() - - with gr.Blocks(theme='nota-ai/theme') as demo: - gr.Markdown(Path('docs/header.md').read_text()) - gr.Markdown(Path('docs/description.md').read_text()) - with gr.Row(): - with gr.Column(variant='panel', scale=30): - - text = gr.Textbox(label="Input Prompt", max_lines=5, placeholder="Enter your prompt") - - with gr.Row().style(equal_height=True): - generate_original_button = gr.Button(value="Generate with Original Model", variant="primary") - generate_compressed_button = gr.Button(value="Generate with Compressed Model", variant="primary") - - with gr.Accordion("Advanced Settings", open=False): - negative = gr.Textbox(label=f'Negative Prompt', placeholder=f'Enter aspects to remove (e.g., {"low quality"})') - with gr.Row(): - guidance_scale = gr.Slider(label="Guidance Scale", value=7.5, minimum=4, maximum=11, step=0.5) - steps = gr.Slider(label="Denoising Steps", value=25, minimum=10, maximum=75, step=5) - seed = gr.Slider(0, 999999, label='Random Seed', value=1234, step=1) - - with gr.Tab("Example Prompts"): - examples = gr.Examples(examples=example_list, inputs=[text]) - - with gr.Column(variant='panel',scale=35): - # Define original model output components - gr.Markdown('

        Original Stable Diffusion 1.4

        ') - original_model_output = gr.Image(label="Original Model") - with gr.Row().style(equal_height=True): - with gr.Column(): - original_model_test_time = gr.Textbox(value="", label="Inference Time (sec)") - original_model_params = gr.Textbox(value=servicer.get_sdm_params(servicer.pipe_original), label="# Parameters") - original_model_error = gr.Markdown() - - - with gr.Column(variant='panel',scale=35): - # Define compressed model output components - gr.Markdown('

        Compressed Stable Diffusion (Ours)

        ') - compressed_model_output = gr.Image(label="Compressed Model") - with gr.Row().style(equal_height=True): - with gr.Column(): - compressed_model_test_time = gr.Textbox(value="", label="Inference Time (sec)") - compressed_model_params = gr.Textbox(value=servicer.get_sdm_params(servicer.pipe_compressed), label="# Parameters") - compressed_model_error = gr.Markdown() - - inputs = [text, negative, guidance_scale, steps, seed] - - # Click the generate button for original model - original_model_outputs = [original_model_output, original_model_error, original_model_test_time] - text.submit(servicer.infer_original_model, inputs=inputs, outputs=original_model_outputs) - generate_original_button.click(servicer.infer_original_model, inputs=inputs, outputs=original_model_outputs) - - # Click the generate button for compressed model - compressed_model_outputs = [compressed_model_output, compressed_model_error, compressed_model_test_time] - text.submit(servicer.infer_compressed_model, inputs=inputs, outputs=compressed_model_outputs) - generate_compressed_button.click(servicer.infer_compressed_model, inputs=inputs, outputs=compressed_model_outputs) - - gr.Markdown(Path('docs/footer.md').read_text()) - - demo.queue(concurrency_count=1) - # demo.launch() - demo.launch() diff --git a/spaces/nurrahmawati3/deployment-hck2/app.py b/spaces/nurrahmawati3/deployment-hck2/app.py deleted file mode 100644 index 6570c60e8772bd147df9c1b89dac9198bd808053..0000000000000000000000000000000000000000 --- a/spaces/nurrahmawati3/deployment-hck2/app.py +++ /dev/null @@ -1,60 +0,0 @@ -import streamlit as st -import pandas as pd -import joblib - -st.header('FTDS Model Deployment') -st.write(""" -Created by FTDS Curriculum Team - -Use the to select input features. -""") - -@st.cache - -def fetch_data(): - df = pd.read_csv('https://raw.githubusercontent.com/ardhiraka/PFDS_sources/master/campus.csv') - return df - -df = fetch_data() - -st.write(df) - -st.subheader('User Input Features') - -gender = st.selectbox('Gender', df['gender'].unique()) -ssc = st.number_input('Secondary School Points', value=67.00) -hsc = st.number_input('High School Points', 0.0, value=91.0) -hsc_s = st.selectbox('High School Spec', df['hsc_s'].unique()) -degree_p = st.number_input('Degree Points', 0.0, value=58.0) -degree_t = st.selectbox('Degree Spec', df['degree_t'].unique()) -workex = st.selectbox('Work Experience?', df['workex'].unique()) -etest_p = st.number_input('Etest Points', 0.0, value=78.00) -spec = st.selectbox('Specialization', df['specialisation'].unique()) -mba_p = st.number_input('MBA Points', 0.0, value=54.55) - -data = { - 'gender': gender, - 'ssc_p': ssc, - 'hsc_p': hsc, - 'hsc_s': hsc_s, - 'degree_p': degree_p, - 'degree_t': degree_t, - 'workex': workex, - 'etest_p': etest_p, - 'specialisation':spec, - 'mba_p': mba_p -} -input = pd.DataFrame(data, index=[0]) - -st.subheader('User Input') -st.write(input) - -load_model = joblib.load("my_model.pkl") - -if st.button("Predict"): - prediction = load_model.predict(input) - - prediction = 'Placed' if prediction==1 else 'Not Placed' - - st.write('Based on user input, the placement model predicted: ') - st.write(prediction) \ No newline at end of file diff --git a/spaces/oguzakif/video-object-remover/FGT_codes/LAFC/train.py b/spaces/oguzakif/video-object-remover/FGT_codes/LAFC/train.py deleted file mode 100644 index 67468dbb90daed76b8af4addb496bd3631fd8958..0000000000000000000000000000000000000000 --- a/spaces/oguzakif/video-object-remover/FGT_codes/LAFC/train.py +++ /dev/null @@ -1,70 +0,0 @@ -from utils.dist import * -from parse import * -from utils.util import find_free_port -import torch.multiprocessing as mp -import torch.distributed -from importlib import import_module - -from flow_inputs import args_parser - - -def main_worker(rank, opt): - if 'local_rank' not in opt: - opt['local_rank'] = opt['global_rank'] = rank - if opt['distributed']: - torch.cuda.set_device(int(opt['local_rank'])) - torch.distributed.init_process_group(backend='nccl', - init_method=opt['init_method'], - world_size=opt['world_size'], - rank=opt['global_rank'], - group_name='mtorch') - print('using GPU {}-{} for training'.format( - int(opt['global_rank']), int(opt['local_rank']))) - - if torch.cuda.is_available(): - opt['device'] = torch.device("cuda:{}".format(opt['local_rank'])) - else: - opt['device'] = 'cpu' - - pkg = import_module('networks.{}'.format(opt['network'])) - trainer = pkg.Network(opt, rank) - trainer.train() - - -def main(args_obj): - opt = parse(args_obj) - opt['world_size'] = get_world_size() - free_port = find_free_port() - master_ip = get_master_ip() - opt['init_method'] = "tcp://{}:{}".format(master_ip, free_port) - opt['distributed'] = True if opt['world_size'] > 1 else False - print(f'World size is: {opt["world_size"]}, and init_method is: {opt["init_method"]}') - print('Import network module: ', opt['network']) - - # dataset file names - if opt['gen_state'] != '': - opt['path']['gen_state'] = opt['gen_state'] - if opt['opt_state'] != '': - opt['path']['opt_state'] = opt['opt_state'] - - if args.finetune == 1: - opt['finetune'] = True - else: - opt['finetune'] = False - - print(f'model is: {opt["model"]}') - - if get_master_ip() == "127.0.0.1": - # localhost - mp.spawn(main_worker, nprocs=opt['world_size'], args=(opt,)) - else: - # multiple processes should be launched by openmpi - opt['local_rank'] = get_local_rank() - opt['global_rank'] = get_global_rank() - main_worker(-1, opt) - - -if __name__ == '__main__': - args = args_parser() - args_obj = vars(args) - main(args_obj) diff --git a/spaces/orangepony4/stabilityai-stable-diffusion-2-1/app.py b/spaces/orangepony4/stabilityai-stable-diffusion-2-1/app.py deleted file mode 100644 index 0160420876923d89f2ab5fccb9f4d13725e29972..0000000000000000000000000000000000000000 --- a/spaces/orangepony4/stabilityai-stable-diffusion-2-1/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/stabilityai/stable-diffusion-2-1").launch() \ No newline at end of file diff --git a/spaces/osanseviero/TheMLGame/interface.css b/spaces/osanseviero/TheMLGame/interface.css deleted file mode 100644 index 77e9927b3c6c057491ac548bc52bd6f870c1c26a..0000000000000000000000000000000000000000 --- a/spaces/osanseviero/TheMLGame/interface.css +++ /dev/null @@ -1,790 +0,0 @@ -#btnMakeProbe { - margin-bottom: 2px; -} - -#btnAddProc { - width: 70px; - margin-bottom: 1px; -} - -#btnAddMem { - width: 70px; -} - -#btnEntertainSwarm { - margin-top: 4px; - margin-bottom: 4px; -} - -#btnSynchSwarm { - margin-top: 4px; - margin-bottom: 4px; -} - - -#qCompDisplay { - margin-left: 3px; - -} - -#swarmSliderDiv { - position: relative; - display: inline-block; - margin-bottom: 10px; -} - -#sliderLabelWork { - position: relative; - display: inline-block; -} - -#swarmSlider { - position: absolute; - display: inline-block; - width: 180px; - top: 0px; - margin-left: 2px; - margin-right: 4px; - -} - -#sliderLabelThink { - position: relative; - display: inline-block; - left: 190px; -} - -.slider { - width: 100%; - -} - -h2 { - line-height: 70%; -} - -.toolTip { - position: relative; - display: inline-block; -} - -.toolTip .toolTipText { - visibility: hidden; - width: 160px; - background-color: #c8c8c8; - color: #000; - text-align: center; - padding: 5px 0; - border-radius: 3px; - - position: absolute; - z-index: 1; - bottom: 75%; - left: 50%; - margin-left: -19px; - - opacity: 0; - transition: opacity 1s; -} - -.toolTip:hover .toolTipText { - visibility: visible; - opacity: 1; -} - -.toolTip2 { - margin-left: 2px; - position: relative; - display: inline-block; - height: 20px; - margin-top: 5px; - -} - -.toolTip2 .toolTipText2 { - visibility: hidden; - width: 200px; - background-color: #c8c8c8; - color: #000; - text-align: center; - padding: 5px 0; - border-radius: 3px; - - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 11; - - position: absolute; - z-index: 1; - bottom: 125%; - left: 50%; - margin-left: -15px; - - opacity: 0; - transition: opacity 1s; -} - -.toolTip2:hover .toolTipText2 { - visibility: visible; - opacity: 1; -} - -.toolTip3 { - position: relative; - display: inline-block; - margin-bottom: 5px; -} - -.toolTip3 .toolTipText3 { - visibility: hidden; - width: 180px; - background-color: #c8c8c8; - color: #000; - text-align: center; - padding: 5px 0; - border-radius: 3px; - - position: absolute; - z-index: 1; - bottom: 27px; - left: 80px; - margin-left: 0px; - - opacity: 0; - transition: opacity 1s; -} - -.toolTip3:hover .toolTipText3 { - visibility: visible; - opacity: 1; -} - - -#cover { - position:fixed; - background-color: white; - top:0; - left:0; - height:100%; - width:100%; - z-index: 10; -} - -#tournamentStuff { -/* height: 58px; */ - -} - -#tournamentLabel { - margin-bottom: 5px; -} - -#wireBuyerDiv { - margin-bottom: 3px; -} - -#processorDisplay { - display: inline-block; - vertical-align: baseline; - height: 22px; -} - -#memoryDisplay { - display: inline-block; - vertical-align: baseline; - height: 22px; -} - -#harvesterMultiButtons { - margin-bottom: 2; -} - -#wireDroneMultiButtons { - margin-bottom: 2; -} - -#farmMultiButtons { - margin-bottom: 2; -} - -#batteryMultiButtons { - margin-bottom: 2; -} - -#autoTourneyStatusDiv { - float: left; - width: 30px; -} - -#tourneyButton { - float: left; - width: 120px; -} - -#autoTourneyControl { - float: left; - width: 78px; -} - -#victoryDiv { - margin-top: 10px; - width: 289px; - text-align: center; -} - -#increaseProbeTrustDiv { - float: left; - width: 130px; - margin-left: 5; - border: 0; -} - -#increaseMaxTrustDiv { - float: left; - width: 150px; - margin: 0; - border: 0; -} - -#combatButtonDiv { - margin-top: 0; -} - -#battleCanvasDiv { - position: relative; -} - -#battleInterfaceDiv { - position: absolute; - left: 15px; - top: 8px; -} - -canvas { - display: block; - margin-top: 0; - margin-bottom: 10; - width: 310px; - height: 150px; - margin-left: auto; - margin-right: auto; - background-color: #808080; -} - -#hypnoDroneEventDiv { - - float: left; - background: black; - width: 100%; - -} - -#prestigeDiv { - background-color: lightgrey; -} - -#consoleDiv { - - float: left; - background: black; - width: 100%; -} -#topDIv { - float: left; - width: 100%; -} -#leftColumn { - float: left ; - width: 35%; -} -#middleColumn { - float: left; - width: 27%; - margin-left: 0.5%; -} -#rightColumn { - float: left; - width: 36%; - margin-left: 0.5%; -} - -#investmentDiv1 { - float: left; - width: 64px; - line-height: 20%; -} - -#investmentDiv2 { - float: left; - width: 65%; - text-align: top; - margin-top: 0; - margin-left: 5px; -} - -#stratDiv1 { - float: left; - width: 95px; - line-height: 20%; -} - -#stratDiv2 { - float: left; - width: 60%; - text-align: top; - margin-top: 0; - margin-left: 5px; -} - -#vertPad { - float: left; - width: 35%; - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 11; -} - -#vertStrat { - float: right; - text-align: center; - width: 65%; - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 11; - -} - -#horizStrat { - float: left; - - display: flex; - align-items: center; - - justify-content: center; - width: 25%; - height: 56px; - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 11; -} - -#investmentEngineUpgrade { - margin: 5px; - margin-bottom: 10px; -} - -#tournamentManagement { - margin: 5px; - margin-bottom: 10px; -} - -#feedButtonDiv { - margin-top: 5; -} - - - - -.engine { - margin: 5; - border: 1px solid grey; - padding: 5; -} - -.engine2 { - margin: 5; - border: 1px solid grey; - padding: 5; -} - -.swarmEngine { - margin-bottom: 5; - border: 1px solid grey; - padding: 5; -} - -.qEngine { - margin-bottom: 10; - border: 1px solid grey; - padding: 5; - height: 5em; - -} - -.qChip { - width: 22px; - height: 22px; - margin-left: 2; - margin-right: 2; - margin-top: 2; - margin-bottom: 7; - background-color: black; - float: left; - -} - -.engineText { - margin-bottom: 0; - margin-top: 0; - font-size: 80%; - line-height: 150%; -} - -.engineText1 { - margin-left: 5; - margin-bottom: 0; - margin-top: 0; -} - -.engineText2 { - margin-bottom: 0; - margin-top: 0; - line-height: 150%; -} - -.engineText3 { - margin-left: 5; - margin-top: 10; - line-height: 150%; -} - -.engineText4 { - margin-left: 5; - margin-top: 10; - margin-bottom: 0; - -} - -.engineText5 { - margin-left: 0; - margin-top: 0; - margin-bottom: 3; -} - -.engineText6 { - margin-left: 0; - margin-top: 0; - margin-bottom: 0; - vertical-align: bottom; -} - -.engineText7 { - margin-left: 0; - margin-top: 0; - margin-bottom: 0; - vertical-align: bottom; -} - -.engineText8 { - margin-left: 0; - margin-top: 3px; - margin-bottom: 0; -} - -.engineText9 { - display: inline-block; - margin-top: 1px; - -} - -.engineText10 { - display: inline-block; - line-height: 18px; - vertical-align: bottom; -} - -hr { - display: block; - margin-top: .05em; - margin-bottom: 0.2em; - margin-left: auto; - margin-right: auto; - border-style: inset; - border-width: 1px; - -} - -hr.short { - - width: 225px; - margin-left: 0; -} - - -table.table1 { - table-layout: fixed; - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 11; - margin-bottom: 0; - margin-top: 0; - width: 100%; - border: none; -} - -table.table1 tr:nth-child(even) { - background-color: #dddddd; -} - -table.table2 { - border-collapse: collapse; - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 11; - margin-bottom: 0; - margin-top: 0; - margin-right: 0px; - margin-left: 0px; - width: 70%; - -} - -table.table3 { - border-collapse: collapse; - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 11; - margin-bottom: 0; - margin-top: 0; - margin-right: 0px; - margin-left: 0px; - width: 100%; - -} - -table.table2 td:nth-child(1) { - width: 45px; - text-align: right; - padding-right: 4px; - border-left: none; - border-top: none; - border-bottom:none; - font-weight: bold; -} - -table.table2 td:nth-child(n>1) { - table-layout: fixed; - -} - -table.table2 td{ - border: 1px solid black; - text-align: center; -} - - -p.clean { - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 11; - margin-bottom: 0; - margin-top: 1px; - -} - -p.clean2 { - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 11; - margin-bottom: 0; - margin-top: 0px; - -} - -p.clean3 { - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 11; - margin-bottom: 5px; - margin-top: 2px; - -} - - -p.console { - font-family: "Lucida Sans Typewriter", "Lucida Console", Monaco, "Bitstream Vera Sans Mono", monospace; - font-size: 12; - color: white; - margin-top: 0; -} - -p.consoleOld { - font-family: "Lucida Sans Typewriter", "Lucida Console", Monaco, "Bitstream Vera Sans Mono", monospace; - font-size: 12; - color: grey; - margin-bottom: 0; -} - -p.hypnoDrone { - - font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; - font-size: 150; - color: white; - line-height: 115px; - margin-bottom: 0px; - margin-top: 0px; - -} - -.pulsate { - -webkit-animation: pulsate .5s ease-out; - -webkit-animation-iteration-count: infinite; - opacity: 0.0; - } - -@-webkit-keyframes pulsate { - 0% { - opacity: 0.0; - } - 50% { - opacity: 1.0; - } - 100% { - opacity: 0.0; - } -} - - -.projectButton { - display: block; - height: 4em; - width: 100%; - background: #c8c8c8; - border: 1px solid rgba(0, 0, 0, 1); - outline: none; - margin-bottom: 6px; -} - -.projectButton:hover { - display: block; - height: 4em; - width: 100%; - background: #c8c8c8; - border: 1px solid rgba(0, 0, 0, 0.25); -} - -.projectButton:active { - display: block; - height: 60px; - width: 100%; - background: #d1d1d1; - border: 1px solid rgba(0, 0, 0, 1); - -} - -.projectButton:disabled { - border: none; -} - -.button { - border: 1px solid #1a1a1a; - background: #666666; - - background: -webkit-gradient(linear, left top, left bottom, from(#ffffff), to(#cccccc)); - background: -webkit-linear-gradient(top, #ffffff, #888888); - background: -moz-linear-gradient(top, #ffffff, #888888); - background: -ms-linear-gradient(top, #ffffff, #888888); - background: -o-linear-gradient(top, #ffffff, #888888); - background-image: -ms-linear-gradient(top, #ffffff 0%, #888888 100%); - - padding: 12.5px 25px; - -webkit-border-radius: 3px; - -moz-border-radius: 3px; - border-radius: 3px; - -webkit-box-shadow: rgba(255,255,255,0.4) 0 1px 0, inset rgba(255,255,255,0.4) 0 1px 0; - -moz-box-shadow: rgba(255,255,255,0.4) 0 1px 0, inset rgba(255,255,255,0.4) 0 1px 0; - box-shadow: rgba(255,255,255,0.4) 0 1px 0, inset rgba(255,255,255,0.4) 0 1px 0; - text-shadow: #cccccc 0 1px 0; - color: #000000; - font-size: 16px; - font-family: helvetica, serif; - text-decoration: none; - vertical-align: middle; - outline: none; - } - -.button:hover { - border: 1px solid #898989; - text-shadow: #d4d4d4 0 1px 0; - background: #666666; - background: -webkit-gradient(linear, left top, left bottom, from(#ffffff), to(#666666)); - background: -webkit-linear-gradient(top, #ffffff, #666666); - background: -moz-linear-gradient(top, #ffffff, #666666); - background: -ms-linear-gradient(top, #ffffff, #666666); - background: -o-linear-gradient(top, #ffffff, #666666); - background-image: -ms-linear-gradient(top, #ffffff 0%, #666666 100%); - color: #000000; - } - -.button:active { - text-shadow: #bfbfbf 0 1px 0; - border: 1px solid #2e2e2e; - - background: #d9d9d9; - background: -webkit-gradient(linear, left top, left bottom, from(#595959), to(#616161)); - background: -webkit-linear-gradient(top, #595959, #d9d9d9); - background: -moz-linear-gradient(top, #595959, #d9d9d9); - background: -ms-linear-gradient(top, #595959, #d9d9d9); - background: -o-linear-gradient(top, #595959, #d9d9d9); - background-image: -ms-linear-gradient(top, #595959 0%, #d9d9d9 100%); - - color: #000000; - } - -.button:disabled{ - opacity: 0.6; - border: 1px solid #ffffff; -} - -.button2 { - border: 1px solid #1a1a1a; - background: #666666; - - background: -webkit-gradient(linear, left top, left bottom, from(#ffffff), to(#cccccc)); - background: -webkit-linear-gradient(top, #ffffff, #888888); - background: -moz-linear-gradient(top, #ffffff, #888888); - background: -ms-linear-gradient(top, #ffffff, #888888); - background: -o-linear-gradient(top, #ffffff, #888888); - background-image: -ms-linear-gradient(top, #ffffff 0%, #888888 100%); - - padding: 2px 4px; - -webkit-border-radius: 2px; - -moz-border-radius: 2px; - border-radius: 2px; - -webkit-box-shadow: rgba(255,255,255,0.4) 0 0px 0, inset rgba(255,255,255,0.4) 0 1px 0; - -moz-box-shadow: rgba(255,255,255,0.4) 0 0px 0, inset rgba(255,255,255,0.4) 0 1px 0; - box-shadow: rgba(255,255,255,0.4) 0 0px 0, inset rgba(255,255,255,0.4) 0 1px 0; - text-shadow: #cccccc 0 1px 0; - color: #000000; - font-size: 11px; - font-family: helvetica, serif; - text-decoration: none; - vertical-align: middle; - outline: none; - } - -.button2:hover { - border: 1px solid #898989; - text-shadow: #d4d4d4 0 1px 0; - background: #616161; - - background: -webkit-gradient(linear, left top, left bottom, from(#f7f7f7), to(#888888)); - background: -webkit-linear-gradient(top, #f7f7f7, #888888); - background: -moz-linear-gradient(top, #f7f7f7, #888888); - background: -ms-linear-gradient(top, #f7f7f7, #888888); - background: -o-linear-gradient(top, #f7f7f7, #888888); - background-image: -ms-linear-gradient(top, #f7f7f7 0%, #888888 100%); - - color: #000000; - } - -.button2:active { - text-shadow: #bfbfbf 0 1px 0; - border: 1px solid #2e2e2e; - - background: #d9d9d9; - background: -webkit-gradient(linear, left top, left bottom, from(#595959), to(#616161)); - background: -webkit-linear-gradient(top, #595959, #d9d9d9); - background: -moz-linear-gradient(top, #595959, #d9d9d9); - background: -ms-linear-gradient(top, #595959, #d9d9d9); - background: -o-linear-gradient(top, #595959, #d9d9d9); - background-image: -ms-linear-gradient(top, #595959 0%, #d9d9d9 100%); - - color: #444444; - } - -.button2:disabled { - opacity: 0.6; - border: 1px solid #ffffff; -} - diff --git a/spaces/osiria/bert-italian-cased-ner/README.md b/spaces/osiria/bert-italian-cased-ner/README.md deleted file mode 100644 index 9ffc9905e559d125c06cd54a56b51a04f95074fd..0000000000000000000000000000000000000000 --- a/spaces/osiria/bert-italian-cased-ner/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Bert Italian Cased Ner -emoji: 🌱 -colorFrom: gray -colorTo: green -sdk: gradio -sdk_version: 3.33.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/community/stable_diffusion_controlnet_inpaint_img2img.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/community/stable_diffusion_controlnet_inpaint_img2img.py deleted file mode 100644 index 341e89398f7d4f9cca3714852978afe75a6f3b58..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/examples/community/stable_diffusion_controlnet_inpaint_img2img.py +++ /dev/null @@ -1,1119 +0,0 @@ -# Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/ - -import inspect -from typing import Any, Callable, Dict, List, Optional, Union - -import numpy as np -import PIL.Image -import torch -import torch.nn.functional as F -from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer - -from diffusers import AutoencoderKL, ControlNetModel, DiffusionPipeline, UNet2DConditionModel, logging -from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker -from diffusers.schedulers import KarrasDiffusionSchedulers -from diffusers.utils import ( - PIL_INTERPOLATION, - is_accelerate_available, - is_accelerate_version, - replace_example_docstring, -) -from diffusers.utils.torch_utils import randn_tensor - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - -EXAMPLE_DOC_STRING = """ - Examples: - ```py - >>> import numpy as np - >>> import torch - >>> from PIL import Image - >>> from stable_diffusion_controlnet_inpaint_img2img import StableDiffusionControlNetInpaintImg2ImgPipeline - - >>> from transformers import AutoImageProcessor, UperNetForSemanticSegmentation - >>> from diffusers import ControlNetModel, UniPCMultistepScheduler - >>> from diffusers.utils import load_image - - >>> def ade_palette(): - return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], - [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], - [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], - [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], - [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], - [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], - [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], - [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], - [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], - [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], - [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], - [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], - [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], - [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], - [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], - [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], - [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], - [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], - [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], - [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], - [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], - [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], - [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], - [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], - [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], - [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], - [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], - [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], - [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], - [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], - [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], - [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], - [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], - [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], - [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], - [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], - [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], - [102, 255, 0], [92, 0, 255]] - - >>> image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-small") - >>> image_segmentor = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-small") - - >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg", torch_dtype=torch.float16) - - >>> pipe = StableDiffusionControlNetInpaintImg2ImgPipeline.from_pretrained( - "runwayml/stable-diffusion-inpainting", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16 - ) - - >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) - >>> pipe.enable_xformers_memory_efficient_attention() - >>> pipe.enable_model_cpu_offload() - - >>> def image_to_seg(image): - pixel_values = image_processor(image, return_tensors="pt").pixel_values - with torch.no_grad(): - outputs = image_segmentor(pixel_values) - seg = image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0] - color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8) # height, width, 3 - palette = np.array(ade_palette()) - for label, color in enumerate(palette): - color_seg[seg == label, :] = color - color_seg = color_seg.astype(np.uint8) - seg_image = Image.fromarray(color_seg) - return seg_image - - >>> image = load_image( - "https://github.com/CompVis/latent-diffusion/raw/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" - ) - - >>> mask_image = load_image( - "https://github.com/CompVis/latent-diffusion/raw/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" - ) - - >>> controlnet_conditioning_image = image_to_seg(image) - - >>> image = pipe( - "Face of a yellow cat, high resolution, sitting on a park bench", - image, - mask_image, - controlnet_conditioning_image, - num_inference_steps=20, - ).images[0] - - >>> image.save("out.png") - ``` -""" - - -def prepare_image(image): - if isinstance(image, torch.Tensor): - # Batch single image - if image.ndim == 3: - image = image.unsqueeze(0) - - image = image.to(dtype=torch.float32) - else: - # preprocess image - if isinstance(image, (PIL.Image.Image, np.ndarray)): - image = [image] - - if isinstance(image, list) and isinstance(image[0], PIL.Image.Image): - image = [np.array(i.convert("RGB"))[None, :] for i in image] - image = np.concatenate(image, axis=0) - elif isinstance(image, list) and isinstance(image[0], np.ndarray): - image = np.concatenate([i[None, :] for i in image], axis=0) - - image = image.transpose(0, 3, 1, 2) - image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 - - return image - - -def prepare_mask_image(mask_image): - if isinstance(mask_image, torch.Tensor): - if mask_image.ndim == 2: - # Batch and add channel dim for single mask - mask_image = mask_image.unsqueeze(0).unsqueeze(0) - elif mask_image.ndim == 3 and mask_image.shape[0] == 1: - # Single mask, the 0'th dimension is considered to be - # the existing batch size of 1 - mask_image = mask_image.unsqueeze(0) - elif mask_image.ndim == 3 and mask_image.shape[0] != 1: - # Batch of mask, the 0'th dimension is considered to be - # the batching dimension - mask_image = mask_image.unsqueeze(1) - - # Binarize mask - mask_image[mask_image < 0.5] = 0 - mask_image[mask_image >= 0.5] = 1 - else: - # preprocess mask - if isinstance(mask_image, (PIL.Image.Image, np.ndarray)): - mask_image = [mask_image] - - if isinstance(mask_image, list) and isinstance(mask_image[0], PIL.Image.Image): - mask_image = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask_image], axis=0) - mask_image = mask_image.astype(np.float32) / 255.0 - elif isinstance(mask_image, list) and isinstance(mask_image[0], np.ndarray): - mask_image = np.concatenate([m[None, None, :] for m in mask_image], axis=0) - - mask_image[mask_image < 0.5] = 0 - mask_image[mask_image >= 0.5] = 1 - mask_image = torch.from_numpy(mask_image) - - return mask_image - - -def prepare_controlnet_conditioning_image( - controlnet_conditioning_image, width, height, batch_size, num_images_per_prompt, device, dtype -): - if not isinstance(controlnet_conditioning_image, torch.Tensor): - if isinstance(controlnet_conditioning_image, PIL.Image.Image): - controlnet_conditioning_image = [controlnet_conditioning_image] - - if isinstance(controlnet_conditioning_image[0], PIL.Image.Image): - controlnet_conditioning_image = [ - np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"]))[None, :] - for i in controlnet_conditioning_image - ] - controlnet_conditioning_image = np.concatenate(controlnet_conditioning_image, axis=0) - controlnet_conditioning_image = np.array(controlnet_conditioning_image).astype(np.float32) / 255.0 - controlnet_conditioning_image = controlnet_conditioning_image.transpose(0, 3, 1, 2) - controlnet_conditioning_image = torch.from_numpy(controlnet_conditioning_image) - elif isinstance(controlnet_conditioning_image[0], torch.Tensor): - controlnet_conditioning_image = torch.cat(controlnet_conditioning_image, dim=0) - - image_batch_size = controlnet_conditioning_image.shape[0] - - if image_batch_size == 1: - repeat_by = batch_size - else: - # image batch size is the same as prompt batch size - repeat_by = num_images_per_prompt - - controlnet_conditioning_image = controlnet_conditioning_image.repeat_interleave(repeat_by, dim=0) - - controlnet_conditioning_image = controlnet_conditioning_image.to(device=device, dtype=dtype) - - return controlnet_conditioning_image - - -class StableDiffusionControlNetInpaintImg2ImgPipeline(DiffusionPipeline): - """ - Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/ - """ - - _optional_components = ["safety_checker", "feature_extractor"] - - def __init__( - self, - vae: AutoencoderKL, - text_encoder: CLIPTextModel, - tokenizer: CLIPTokenizer, - unet: UNet2DConditionModel, - controlnet: ControlNetModel, - scheduler: KarrasDiffusionSchedulers, - safety_checker: StableDiffusionSafetyChecker, - feature_extractor: CLIPImageProcessor, - requires_safety_checker: bool = True, - ): - super().__init__() - - if safety_checker is None and requires_safety_checker: - logger.warning( - f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" - " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" - " results in services or applications open to the public. Both the diffusers team and Hugging Face" - " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" - " it only for use-cases that involve analyzing network behavior or auditing its results. For more" - " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." - ) - - if safety_checker is not None and feature_extractor is None: - raise ValueError( - "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" - " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." - ) - - self.register_modules( - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - controlnet=controlnet, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - ) - self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) - self.register_to_config(requires_safety_checker=requires_safety_checker) - - def enable_vae_slicing(self): - r""" - Enable sliced VAE decoding. - - When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several - steps. This is useful to save some memory and allow larger batch sizes. - """ - self.vae.enable_slicing() - - def disable_vae_slicing(self): - r""" - Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to - computing decoding in one step. - """ - self.vae.disable_slicing() - - def enable_sequential_cpu_offload(self, gpu_id=0): - r""" - Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet, - text_encoder, vae, controlnet, and safety checker have their state dicts saved to CPU and then are moved to a - `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called. - Note that offloading happens on a submodule basis. Memory savings are higher than with - `enable_model_cpu_offload`, but performance is lower. - """ - if is_accelerate_available(): - from accelerate import cpu_offload - else: - raise ImportError("Please install accelerate via `pip install accelerate`") - - device = torch.device(f"cuda:{gpu_id}") - - for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.controlnet]: - cpu_offload(cpu_offloaded_model, device) - - if self.safety_checker is not None: - cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True) - - def enable_model_cpu_offload(self, gpu_id=0): - r""" - Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared - to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward` - method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with - `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`. - """ - if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"): - from accelerate import cpu_offload_with_hook - else: - raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.") - - device = torch.device(f"cuda:{gpu_id}") - - hook = None - for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]: - _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) - - if self.safety_checker is not None: - # the safety checker can offload the vae again - _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook) - - # control net hook has be manually offloaded as it alternates with unet - cpu_offload_with_hook(self.controlnet, device) - - # We'll offload the last model manually. - self.final_offload_hook = hook - - @property - def _execution_device(self): - r""" - Returns the device on which the pipeline's models will be executed. After calling - `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module - hooks. - """ - if not hasattr(self.unet, "_hf_hook"): - return self.device - for module in self.unet.modules(): - if ( - hasattr(module, "_hf_hook") - and hasattr(module._hf_hook, "execution_device") - and module._hf_hook.execution_device is not None - ): - return torch.device(module._hf_hook.execution_device) - return self.device - - def _encode_prompt( - self, - prompt, - device, - num_images_per_prompt, - do_classifier_free_guidance, - negative_prompt=None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - ): - r""" - Encodes the prompt into text encoder hidden states. - - Args: - prompt (`str` or `List[str]`, *optional*): - prompt to be encoded - device: (`torch.device`): - torch device - num_images_per_prompt (`int`): - number of images that should be generated per prompt - do_classifier_free_guidance (`bool`): - whether to use classifier free guidance or not - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. - Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt - weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input - argument. - """ - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - if prompt_embeds is None: - text_inputs = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="pt", - ) - text_input_ids = text_inputs.input_ids - untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids - - if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( - text_input_ids, untruncated_ids - ): - removed_text = self.tokenizer.batch_decode( - untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] - ) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {self.tokenizer.model_max_length} tokens: {removed_text}" - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = text_inputs.attention_mask.to(device) - else: - attention_mask = None - - prompt_embeds = self.text_encoder( - text_input_ids.to(device), - attention_mask=attention_mask, - ) - prompt_embeds = prompt_embeds[0] - - prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) - - bs_embed, seq_len, _ = prompt_embeds.shape - # duplicate text embeddings for each generation per prompt, using mps friendly method - prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) - prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) - - # get unconditional embeddings for classifier free guidance - if do_classifier_free_guidance and negative_prompt_embeds is None: - uncond_tokens: List[str] - if negative_prompt is None: - uncond_tokens = [""] * batch_size - elif type(prompt) is not type(negative_prompt): - raise TypeError( - f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" - f" {type(prompt)}." - ) - elif isinstance(negative_prompt, str): - uncond_tokens = [negative_prompt] - elif batch_size != len(negative_prompt): - raise ValueError( - f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" - f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" - " the batch size of `prompt`." - ) - else: - uncond_tokens = negative_prompt - - max_length = prompt_embeds.shape[1] - uncond_input = self.tokenizer( - uncond_tokens, - padding="max_length", - max_length=max_length, - truncation=True, - return_tensors="pt", - ) - - if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: - attention_mask = uncond_input.attention_mask.to(device) - else: - attention_mask = None - - negative_prompt_embeds = self.text_encoder( - uncond_input.input_ids.to(device), - attention_mask=attention_mask, - ) - negative_prompt_embeds = negative_prompt_embeds[0] - - if do_classifier_free_guidance: - # duplicate unconditional embeddings for each generation per prompt, using mps friendly method - seq_len = negative_prompt_embeds.shape[1] - - negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device) - - negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) - negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) - - return prompt_embeds - - def run_safety_checker(self, image, device, dtype): - if self.safety_checker is not None: - safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device) - image, has_nsfw_concept = self.safety_checker( - images=image, clip_input=safety_checker_input.pixel_values.to(dtype) - ) - else: - has_nsfw_concept = None - return image, has_nsfw_concept - - def decode_latents(self, latents): - latents = 1 / self.vae.config.scaling_factor * latents - image = self.vae.decode(latents).sample - image = (image / 2 + 0.5).clamp(0, 1) - # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 - image = image.cpu().permute(0, 2, 3, 1).float().numpy() - return image - - def prepare_extra_step_kwargs(self, generator, eta): - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # check if the scheduler accepts generator - accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) - if accepts_generator: - extra_step_kwargs["generator"] = generator - return extra_step_kwargs - - def check_inputs( - self, - prompt, - image, - mask_image, - controlnet_conditioning_image, - height, - width, - callback_steps, - negative_prompt=None, - prompt_embeds=None, - negative_prompt_embeds=None, - strength=None, - ): - if height % 8 != 0 or width % 8 != 0: - raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") - - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - if prompt is not None and prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" - " only forward one of the two." - ) - elif prompt is None and prompt_embeds is None: - raise ValueError( - "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." - ) - elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if negative_prompt is not None and negative_prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" - f" {negative_prompt_embeds}. Please make sure to only forward one of the two." - ) - - if prompt_embeds is not None and negative_prompt_embeds is not None: - if prompt_embeds.shape != negative_prompt_embeds.shape: - raise ValueError( - "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" - f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" - f" {negative_prompt_embeds.shape}." - ) - - controlnet_cond_image_is_pil = isinstance(controlnet_conditioning_image, PIL.Image.Image) - controlnet_cond_image_is_tensor = isinstance(controlnet_conditioning_image, torch.Tensor) - controlnet_cond_image_is_pil_list = isinstance(controlnet_conditioning_image, list) and isinstance( - controlnet_conditioning_image[0], PIL.Image.Image - ) - controlnet_cond_image_is_tensor_list = isinstance(controlnet_conditioning_image, list) and isinstance( - controlnet_conditioning_image[0], torch.Tensor - ) - - if ( - not controlnet_cond_image_is_pil - and not controlnet_cond_image_is_tensor - and not controlnet_cond_image_is_pil_list - and not controlnet_cond_image_is_tensor_list - ): - raise TypeError( - "image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors" - ) - - if controlnet_cond_image_is_pil: - controlnet_cond_image_batch_size = 1 - elif controlnet_cond_image_is_tensor: - controlnet_cond_image_batch_size = controlnet_conditioning_image.shape[0] - elif controlnet_cond_image_is_pil_list: - controlnet_cond_image_batch_size = len(controlnet_conditioning_image) - elif controlnet_cond_image_is_tensor_list: - controlnet_cond_image_batch_size = len(controlnet_conditioning_image) - - if prompt is not None and isinstance(prompt, str): - prompt_batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - prompt_batch_size = len(prompt) - elif prompt_embeds is not None: - prompt_batch_size = prompt_embeds.shape[0] - - if controlnet_cond_image_batch_size != 1 and controlnet_cond_image_batch_size != prompt_batch_size: - raise ValueError( - f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {controlnet_cond_image_batch_size}, prompt batch size: {prompt_batch_size}" - ) - - if isinstance(image, torch.Tensor) and not isinstance(mask_image, torch.Tensor): - raise TypeError("if `image` is a tensor, `mask_image` must also be a tensor") - - if isinstance(image, PIL.Image.Image) and not isinstance(mask_image, PIL.Image.Image): - raise TypeError("if `image` is a PIL image, `mask_image` must also be a PIL image") - - if isinstance(image, torch.Tensor): - if image.ndim != 3 and image.ndim != 4: - raise ValueError("`image` must have 3 or 4 dimensions") - - if mask_image.ndim != 2 and mask_image.ndim != 3 and mask_image.ndim != 4: - raise ValueError("`mask_image` must have 2, 3, or 4 dimensions") - - if image.ndim == 3: - image_batch_size = 1 - image_channels, image_height, image_width = image.shape - elif image.ndim == 4: - image_batch_size, image_channels, image_height, image_width = image.shape - - if mask_image.ndim == 2: - mask_image_batch_size = 1 - mask_image_channels = 1 - mask_image_height, mask_image_width = mask_image.shape - elif mask_image.ndim == 3: - mask_image_channels = 1 - mask_image_batch_size, mask_image_height, mask_image_width = mask_image.shape - elif mask_image.ndim == 4: - mask_image_batch_size, mask_image_channels, mask_image_height, mask_image_width = mask_image.shape - - if image_channels != 3: - raise ValueError("`image` must have 3 channels") - - if mask_image_channels != 1: - raise ValueError("`mask_image` must have 1 channel") - - if image_batch_size != mask_image_batch_size: - raise ValueError("`image` and `mask_image` mush have the same batch sizes") - - if image_height != mask_image_height or image_width != mask_image_width: - raise ValueError("`image` and `mask_image` must have the same height and width dimensions") - - if image.min() < -1 or image.max() > 1: - raise ValueError("`image` should be in range [-1, 1]") - - if mask_image.min() < 0 or mask_image.max() > 1: - raise ValueError("`mask_image` should be in range [0, 1]") - else: - mask_image_channels = 1 - image_channels = 3 - - single_image_latent_channels = self.vae.config.latent_channels - - total_latent_channels = single_image_latent_channels * 2 + mask_image_channels - - if total_latent_channels != self.unet.config.in_channels: - raise ValueError( - f"The config of `pipeline.unet` expects {self.unet.config.in_channels} but received" - f" non inpainting latent channels: {single_image_latent_channels}," - f" mask channels: {mask_image_channels}, and masked image channels: {single_image_latent_channels}." - f" Please verify the config of `pipeline.unet` and the `mask_image` and `image` inputs." - ) - - if strength < 0 or strength > 1: - raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") - - def get_timesteps(self, num_inference_steps, strength, device): - # get the original timestep using init_timestep - init_timestep = min(int(num_inference_steps * strength), num_inference_steps) - - t_start = max(num_inference_steps - init_timestep, 0) - timesteps = self.scheduler.timesteps[t_start:] - - return timesteps, num_inference_steps - t_start - - def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None): - if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): - raise ValueError( - f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" - ) - - image = image.to(device=device, dtype=dtype) - - batch_size = batch_size * num_images_per_prompt - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - if isinstance(generator, list): - init_latents = [ - self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size) - ] - init_latents = torch.cat(init_latents, dim=0) - else: - init_latents = self.vae.encode(image).latent_dist.sample(generator) - - init_latents = self.vae.config.scaling_factor * init_latents - - if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: - raise ValueError( - f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." - ) - else: - init_latents = torch.cat([init_latents], dim=0) - - shape = init_latents.shape - noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) - - # get latents - init_latents = self.scheduler.add_noise(init_latents, noise, timestep) - latents = init_latents - - return latents - - def prepare_mask_latents(self, mask_image, batch_size, height, width, dtype, device, do_classifier_free_guidance): - # resize the mask to latents shape as we concatenate the mask to the latents - # we do that before converting to dtype to avoid breaking in case we're using cpu_offload - # and half precision - mask_image = F.interpolate(mask_image, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)) - mask_image = mask_image.to(device=device, dtype=dtype) - - # duplicate mask for each generation per prompt, using mps friendly method - if mask_image.shape[0] < batch_size: - if not batch_size % mask_image.shape[0] == 0: - raise ValueError( - "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" - f" a total batch size of {batch_size}, but {mask_image.shape[0]} masks were passed. Make sure the number" - " of masks that you pass is divisible by the total requested batch size." - ) - mask_image = mask_image.repeat(batch_size // mask_image.shape[0], 1, 1, 1) - - mask_image = torch.cat([mask_image] * 2) if do_classifier_free_guidance else mask_image - - mask_image_latents = mask_image - - return mask_image_latents - - def prepare_masked_image_latents( - self, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance - ): - masked_image = masked_image.to(device=device, dtype=dtype) - - # encode the mask image into latents space so we can concatenate it to the latents - if isinstance(generator, list): - masked_image_latents = [ - self.vae.encode(masked_image[i : i + 1]).latent_dist.sample(generator=generator[i]) - for i in range(batch_size) - ] - masked_image_latents = torch.cat(masked_image_latents, dim=0) - else: - masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator) - masked_image_latents = self.vae.config.scaling_factor * masked_image_latents - - # duplicate masked_image_latents for each generation per prompt, using mps friendly method - if masked_image_latents.shape[0] < batch_size: - if not batch_size % masked_image_latents.shape[0] == 0: - raise ValueError( - "The passed images and the required batch size don't match. Images are supposed to be duplicated" - f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." - " Make sure the number of images that you pass is divisible by the total requested batch size." - ) - masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1) - - masked_image_latents = ( - torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents - ) - - # aligning device to prevent device errors when concating it with the latent model input - masked_image_latents = masked_image_latents.to(device=device, dtype=dtype) - return masked_image_latents - - def _default_height_width(self, height, width, image): - if isinstance(image, list): - image = image[0] - - if height is None: - if isinstance(image, PIL.Image.Image): - height = image.height - elif isinstance(image, torch.Tensor): - height = image.shape[3] - - height = (height // 8) * 8 # round down to nearest multiple of 8 - - if width is None: - if isinstance(image, PIL.Image.Image): - width = image.width - elif isinstance(image, torch.Tensor): - width = image.shape[2] - - width = (width // 8) * 8 # round down to nearest multiple of 8 - - return height, width - - @torch.no_grad() - @replace_example_docstring(EXAMPLE_DOC_STRING) - def __call__( - self, - prompt: Union[str, List[str]] = None, - image: Union[torch.Tensor, PIL.Image.Image] = None, - mask_image: Union[torch.Tensor, PIL.Image.Image] = None, - controlnet_conditioning_image: Union[ - torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image] - ] = None, - strength: float = 0.8, - height: Optional[int] = None, - width: Optional[int] = None, - num_inference_steps: int = 50, - guidance_scale: float = 7.5, - negative_prompt: Optional[Union[str, List[str]]] = None, - num_images_per_prompt: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - latents: Optional[torch.FloatTensor] = None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, - callback_steps: int = 1, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - controlnet_conditioning_scale: float = 1.0, - ): - r""" - Function invoked when calling the pipeline for generation. - - Args: - prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. - instead. - image (`torch.Tensor` or `PIL.Image.Image`): - `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will - be masked out with `mask_image` and repainted according to `prompt`. - mask_image (`torch.Tensor` or `PIL.Image.Image`): - `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be - repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted - to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) - instead of 3, so the expected shape would be `(B, H, W, 1)`. - controlnet_conditioning_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]`): - The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If - the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. PIL.Image.Image` can - also be accepted as an image. The control image is automatically resized to fit the output image. - strength (`float`, *optional*): - Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` - will be used as a starting point, adding more noise to it the larger the `strength`. The number of - denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will - be maximum and the denoising process will run for the full number of iterations specified in - `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. - height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): - The width in pixels of the generated image. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 7.5): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. - Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to - [`schedulers.DDIMScheduler`], will be ignored for others. - generator (`torch.Generator` or `List[torch.Generator]`, *optional*): - One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) - to make generation deterministic. - latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt - weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input - argument. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that will be called every `callback_steps` steps during inference. The function will be - called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function will be called. If not specified, the callback will be - called at every step. - cross_attention_kwargs (`dict`, *optional*): - A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under - `self.processor` in - [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). - controlnet_conditioning_scale (`float`, *optional*, defaults to 1.0): - The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added - to the residual in the original unet. - - Examples: - - Returns: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. - When returning a tuple, the first element is a list with the generated images, and the second element is a - list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" - (nsfw) content, according to the `safety_checker`. - """ - # 0. Default height and width to unet - height, width = self._default_height_width(height, width, controlnet_conditioning_image) - - # 1. Check inputs. Raise error if not correct - self.check_inputs( - prompt, - image, - mask_image, - controlnet_conditioning_image, - height, - width, - callback_steps, - negative_prompt, - prompt_embeds, - negative_prompt_embeds, - strength, - ) - - # 2. Define call parameters - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - device = self._execution_device - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - # 3. Encode input prompt - prompt_embeds = self._encode_prompt( - prompt, - device, - num_images_per_prompt, - do_classifier_free_guidance, - negative_prompt, - prompt_embeds=prompt_embeds, - negative_prompt_embeds=negative_prompt_embeds, - ) - - # 4. Prepare mask, image, and controlnet_conditioning_image - image = prepare_image(image) - - mask_image = prepare_mask_image(mask_image) - - controlnet_conditioning_image = prepare_controlnet_conditioning_image( - controlnet_conditioning_image, - width, - height, - batch_size * num_images_per_prompt, - num_images_per_prompt, - device, - self.controlnet.dtype, - ) - - masked_image = image * (mask_image < 0.5) - - # 5. Prepare timesteps - self.scheduler.set_timesteps(num_inference_steps, device=device) - timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device) - latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) - - # 6. Prepare latent variables - latents = self.prepare_latents( - image, - latent_timestep, - batch_size, - num_images_per_prompt, - prompt_embeds.dtype, - device, - generator, - ) - - mask_image_latents = self.prepare_mask_latents( - mask_image, - batch_size * num_images_per_prompt, - height, - width, - prompt_embeds.dtype, - device, - do_classifier_free_guidance, - ) - - masked_image_latents = self.prepare_masked_image_latents( - masked_image, - batch_size * num_images_per_prompt, - height, - width, - prompt_embeds.dtype, - device, - generator, - do_classifier_free_guidance, - ) - - if do_classifier_free_guidance: - controlnet_conditioning_image = torch.cat([controlnet_conditioning_image] * 2) - - # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline - extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - - # 8. Denoising loop - num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order - with self.progress_bar(total=num_inference_steps) as progress_bar: - for i, t in enumerate(timesteps): - # expand the latents if we are doing classifier free guidance - non_inpainting_latent_model_input = ( - torch.cat([latents] * 2) if do_classifier_free_guidance else latents - ) - - non_inpainting_latent_model_input = self.scheduler.scale_model_input( - non_inpainting_latent_model_input, t - ) - - inpainting_latent_model_input = torch.cat( - [non_inpainting_latent_model_input, mask_image_latents, masked_image_latents], dim=1 - ) - - down_block_res_samples, mid_block_res_sample = self.controlnet( - non_inpainting_latent_model_input, - t, - encoder_hidden_states=prompt_embeds, - controlnet_cond=controlnet_conditioning_image, - return_dict=False, - ) - - down_block_res_samples = [ - down_block_res_sample * controlnet_conditioning_scale - for down_block_res_sample in down_block_res_samples - ] - mid_block_res_sample *= controlnet_conditioning_scale - - # predict the noise residual - noise_pred = self.unet( - inpainting_latent_model_input, - t, - encoder_hidden_states=prompt_embeds, - cross_attention_kwargs=cross_attention_kwargs, - down_block_additional_residuals=down_block_res_samples, - mid_block_additional_residual=mid_block_res_sample, - ).sample - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample - - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - # If we do sequential model offloading, let's offload unet and controlnet - # manually for max memory savings - if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: - self.unet.to("cpu") - self.controlnet.to("cpu") - torch.cuda.empty_cache() - - if output_type == "latent": - image = latents - has_nsfw_concept = None - elif output_type == "pil": - # 8. Post-processing - image = self.decode_latents(latents) - - # 9. Run safety checker - image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) - - # 10. Convert to PIL - image = self.numpy_to_pil(image) - else: - # 8. Post-processing - image = self.decode_latents(latents) - - # 9. Run safety checker - image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) - - # Offload last model to CPU - if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: - self.final_offload_hook.offload() - - if not return_dict: - return (image, has_nsfw_concept) - - return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/spaces/pjmartorell/AnimeGANv3/app.py b/spaces/pjmartorell/AnimeGANv3/app.py deleted file mode 100644 index 677f429041bf7da6a7161135726355e315db4712..0000000000000000000000000000000000000000 --- a/spaces/pjmartorell/AnimeGANv3/app.py +++ /dev/null @@ -1,108 +0,0 @@ -import os -import cv2 -import gradio as gr -import AnimeGANv3_src - - -os.makedirs('output', exist_ok=True) - - -def inference(img_path, Style, if_face=None): - print(img_path, Style, if_face) - try: - img = cv2.imread(img_path) - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - if Style == "AnimeGANv3_Arcane": - f = "A" - elif Style == "AnimeGANv3_Trump v1.0": - f = "T" - elif Style == "AnimeGANv3_Shinkai": - f = "S" - elif Style == "AnimeGANv3_PortraitSketch": - f = "P" - elif Style == "AnimeGANv3_Hayao": - f = "H" - elif Style == "AnimeGANv3_Disney v1.0": - f = "D" - elif Style == "AnimeGANv3_JP_face v1.0": - f = "J" - else: - f = "U" - - try: - det_face = True if if_face=="Yes" else False - output = AnimeGANv3_src.Convert(img, f, det_face) - save_path = f"output/out.{img_path.rsplit('.')[-1]}" - cv2.imwrite(save_path, output[:, :, ::-1]) - return output, save_path - except RuntimeError as error: - print('Error', error) - except Exception as error: - print('global exception', error) - return None, None - - -title = "AnimeGANv3: To produce your own animation." -description = r"""Official online demo for AnimeGANv3. If you like what I'm doing you can tip me on **patreon**.
        -It can be used to turn your photos or videos into anime.
        -To use it, simply upload your image. It can convert landscape photos to Hayao Miyazaki or Makoto Shinkai style anime, as well as 6 style conversions about human faces.
        -If AnimeGANv3 is helpful, please help to ⭐ the Github Repo and recommend it to your friends. 😊 - -""" -article = r""" - -[![GitHub Stars](https://img.shields.io/github/stars/TachibanaYoshino/AnimeGANv3?style=social)](https://github.com/TachibanaYoshino/AnimeGANv3) - -### 🔥 Demo -I. Video to anime (Hayao Style) -

        - - - -

        -II. Video to anime (USA cartoon + Disney style) - - ----------- - -## License -This repo is made freely available to academic and non-academic entities for non-commercial purposes such as academic research, teaching, scientific publications. Permission is granted to use the AnimeGANv3 given that you agree to my license terms. Regarding the request for commercial use, please contact us via email to help you obtain the authorization letter. - -## Acknowledgement -* Huggingface UI is referenced from @akhaliq/GFPGAN. -* The dataset of AnimeGANv3_JP_face v1.0 is from DCTnet and then manually optimized. - -## Author -Xin Chen -If you have any question, please open an issue on GitHub Repo. - - -
        visitor badge
        -""" -gr.Interface( - inference, [ - gr.inputs.Image(type="filepath", label="Input"), - gr.Dropdown([ - 'AnimeGANv3_Hayao', - 'AnimeGANv3_Shinkai', - 'AnimeGANv3_Arcane', - 'AnimeGANv3_USA', - 'AnimeGANv3_Trump v1.0', - 'AnimeGANv3_Disney v1.0', - 'AnimeGANv3_PortraitSketch', - 'AnimeGANv3_JP_face v1.0', - ], - type="value", - value='AnimeGANv3_Hayao', - label='AnimeGANv3 Style'), - gr.inputs.Radio(['Yes', 'No'], type="value", default='No', label='Extract face'), - ], [ - gr.outputs.Image(type="numpy", label="Output (The whole image)"), - gr.outputs.File(label="Download the output image") - ], - title=title, - description=description, - article=article, - allow_flagging="never", - examples=[['samples/7_out.jpg', 'AnimeGANv3_Arcane', "Yes"], ['samples/15566.jpg', 'AnimeGANv3_USA', "Yes"],['samples/23034.jpg', 'AnimeGANv3_Trump v1.0', "Yes"], ['samples/jp_13.jpg', 'AnimeGANv3_Hayao', "No"], - ['samples/jp_20.jpg', 'AnimeGANv3_Shinkai', "No"], ['samples/Hamabe Minami.jpg', 'AnimeGANv3_Disney v1.0', "Yes"], ['samples/120.jpg', 'AnimeGANv3_JP_face v1.0', "Yes"], ['samples/52014.jpg', 'AnimeGANv3_PortraitSketch', "Yes"]]).launch(enable_queue=True) diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/diagnose.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/diagnose.py deleted file mode 100644 index ad36183898eddb11e33ccb7623c0291ccc0f091d..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/diagnose.py +++ /dev/null @@ -1,37 +0,0 @@ -import os -import platform - -from pip._vendor.rich import inspect -from pip._vendor.rich.console import Console, get_windows_console_features -from pip._vendor.rich.panel import Panel -from pip._vendor.rich.pretty import Pretty - - -def report() -> None: # pragma: no cover - """Print a report to the terminal with debugging information""" - console = Console() - inspect(console) - features = get_windows_console_features() - inspect(features) - - env_names = ( - "TERM", - "COLORTERM", - "CLICOLOR", - "NO_COLOR", - "TERM_PROGRAM", - "COLUMNS", - "LINES", - "JUPYTER_COLUMNS", - "JUPYTER_LINES", - "JPY_PARENT_PID", - "VSCODE_VERBOSE_LOGGING", - ) - env = {name: os.getenv(name) for name in env_names} - console.print(Panel.fit((Pretty(env)), title="[b]Environment Variables")) - - console.print(f'platform="{platform.system()}"') - - -if __name__ == "__main__": # pragma: no cover - report() diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/command/register.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/command/register.py deleted file mode 100644 index c19aabb91ff4595bc7e20e9d6f80a16a9be5d42b..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/command/register.py +++ /dev/null @@ -1,320 +0,0 @@ -"""distutils.command.register - -Implements the Distutils 'register' command (register with the repository). -""" - -# created 2002/10/21, Richard Jones - -import getpass -import io -import logging -import urllib.parse -import urllib.request -from warnings import warn - -from ..core import PyPIRCCommand -from distutils._log import log - - -class register(PyPIRCCommand): - description = "register the distribution with the Python package index" - user_options = PyPIRCCommand.user_options + [ - ('list-classifiers', None, 'list the valid Trove classifiers'), - ( - 'strict', - None, - 'Will stop the registering if the meta-data are not fully compliant', - ), - ] - boolean_options = PyPIRCCommand.boolean_options + [ - 'verify', - 'list-classifiers', - 'strict', - ] - - sub_commands = [('check', lambda self: True)] - - def initialize_options(self): - PyPIRCCommand.initialize_options(self) - self.list_classifiers = 0 - self.strict = 0 - - def finalize_options(self): - PyPIRCCommand.finalize_options(self) - # setting options for the `check` subcommand - check_options = { - 'strict': ('register', self.strict), - 'restructuredtext': ('register', 1), - } - self.distribution.command_options['check'] = check_options - - def run(self): - self.finalize_options() - self._set_config() - - # Run sub commands - for cmd_name in self.get_sub_commands(): - self.run_command(cmd_name) - - if self.dry_run: - self.verify_metadata() - elif self.list_classifiers: - self.classifiers() - else: - self.send_metadata() - - def check_metadata(self): - """Deprecated API.""" - warn( - "distutils.command.register.check_metadata is deprecated; " - "use the check command instead", - DeprecationWarning, - ) - check = self.distribution.get_command_obj('check') - check.ensure_finalized() - check.strict = self.strict - check.restructuredtext = 1 - check.run() - - def _set_config(self): - '''Reads the configuration file and set attributes.''' - config = self._read_pypirc() - if config != {}: - self.username = config['username'] - self.password = config['password'] - self.repository = config['repository'] - self.realm = config['realm'] - self.has_config = True - else: - if self.repository not in ('pypi', self.DEFAULT_REPOSITORY): - raise ValueError('%s not found in .pypirc' % self.repository) - if self.repository == 'pypi': - self.repository = self.DEFAULT_REPOSITORY - self.has_config = False - - def classifiers(self): - '''Fetch the list of classifiers from the server.''' - url = self.repository + '?:action=list_classifiers' - response = urllib.request.urlopen(url) - log.info(self._read_pypi_response(response)) - - def verify_metadata(self): - '''Send the metadata to the package index server to be checked.''' - # send the info to the server and report the result - (code, result) = self.post_to_server(self.build_post_data('verify')) - log.info('Server response (%s): %s', code, result) - - def send_metadata(self): # noqa: C901 - '''Send the metadata to the package index server. - - Well, do the following: - 1. figure who the user is, and then - 2. send the data as a Basic auth'ed POST. - - First we try to read the username/password from $HOME/.pypirc, - which is a ConfigParser-formatted file with a section - [distutils] containing username and password entries (both - in clear text). Eg: - - [distutils] - index-servers = - pypi - - [pypi] - username: fred - password: sekrit - - Otherwise, to figure who the user is, we offer the user three - choices: - - 1. use existing login, - 2. register as a new user, or - 3. set the password to a random string and email the user. - - ''' - # see if we can short-cut and get the username/password from the - # config - if self.has_config: - choice = '1' - username = self.username - password = self.password - else: - choice = 'x' - username = password = '' - - # get the user's login info - choices = '1 2 3 4'.split() - while choice not in choices: - self.announce( - '''\ -We need to know who you are, so please choose either: - 1. use your existing login, - 2. register as a new user, - 3. have the server generate a new password for you (and email it to you), or - 4. quit -Your selection [default 1]: ''', - logging.INFO, - ) - choice = input() - if not choice: - choice = '1' - elif choice not in choices: - print('Please choose one of the four options!') - - if choice == '1': - # get the username and password - while not username: - username = input('Username: ') - while not password: - password = getpass.getpass('Password: ') - - # set up the authentication - auth = urllib.request.HTTPPasswordMgr() - host = urllib.parse.urlparse(self.repository)[1] - auth.add_password(self.realm, host, username, password) - # send the info to the server and report the result - code, result = self.post_to_server(self.build_post_data('submit'), auth) - self.announce('Server response ({}): {}'.format(code, result), logging.INFO) - - # possibly save the login - if code == 200: - if self.has_config: - # sharing the password in the distribution instance - # so the upload command can reuse it - self.distribution.password = password - else: - self.announce( - ( - 'I can store your PyPI login so future ' - 'submissions will be faster.' - ), - logging.INFO, - ) - self.announce( - '(the login will be stored in %s)' % self._get_rc_file(), - logging.INFO, - ) - choice = 'X' - while choice.lower() not in 'yn': - choice = input('Save your login (y/N)?') - if not choice: - choice = 'n' - if choice.lower() == 'y': - self._store_pypirc(username, password) - - elif choice == '2': - data = {':action': 'user'} - data['name'] = data['password'] = data['email'] = '' - data['confirm'] = None - while not data['name']: - data['name'] = input('Username: ') - while data['password'] != data['confirm']: - while not data['password']: - data['password'] = getpass.getpass('Password: ') - while not data['confirm']: - data['confirm'] = getpass.getpass(' Confirm: ') - if data['password'] != data['confirm']: - data['password'] = '' - data['confirm'] = None - print("Password and confirm don't match!") - while not data['email']: - data['email'] = input(' EMail: ') - code, result = self.post_to_server(data) - if code != 200: - log.info('Server response (%s): %s', code, result) - else: - log.info('You will receive an email shortly.') - log.info('Follow the instructions in it to ' 'complete registration.') - elif choice == '3': - data = {':action': 'password_reset'} - data['email'] = '' - while not data['email']: - data['email'] = input('Your email address: ') - code, result = self.post_to_server(data) - log.info('Server response (%s): %s', code, result) - - def build_post_data(self, action): - # figure the data to send - the metadata plus some additional - # information used by the package server - meta = self.distribution.metadata - data = { - ':action': action, - 'metadata_version': '1.0', - 'name': meta.get_name(), - 'version': meta.get_version(), - 'summary': meta.get_description(), - 'home_page': meta.get_url(), - 'author': meta.get_contact(), - 'author_email': meta.get_contact_email(), - 'license': meta.get_licence(), - 'description': meta.get_long_description(), - 'keywords': meta.get_keywords(), - 'platform': meta.get_platforms(), - 'classifiers': meta.get_classifiers(), - 'download_url': meta.get_download_url(), - # PEP 314 - 'provides': meta.get_provides(), - 'requires': meta.get_requires(), - 'obsoletes': meta.get_obsoletes(), - } - if data['provides'] or data['requires'] or data['obsoletes']: - data['metadata_version'] = '1.1' - return data - - def post_to_server(self, data, auth=None): # noqa: C901 - '''Post a query to the server, and return a string response.''' - if 'name' in data: - self.announce( - 'Registering {} to {}'.format(data['name'], self.repository), - logging.INFO, - ) - # Build up the MIME payload for the urllib2 POST data - boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' - sep_boundary = '\n--' + boundary - end_boundary = sep_boundary + '--' - body = io.StringIO() - for key, value in data.items(): - # handle multiple entries for the same name - if type(value) not in (type([]), type(())): - value = [value] - for value in value: - value = str(value) - body.write(sep_boundary) - body.write('\nContent-Disposition: form-data; name="%s"' % key) - body.write("\n\n") - body.write(value) - if value and value[-1] == '\r': - body.write('\n') # write an extra newline (lurve Macs) - body.write(end_boundary) - body.write("\n") - body = body.getvalue().encode("utf-8") - - # build the Request - headers = { - 'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8' - % boundary, - 'Content-length': str(len(body)), - } - req = urllib.request.Request(self.repository, body, headers) - - # handle HTTP and include the Basic Auth handler - opener = urllib.request.build_opener( - urllib.request.HTTPBasicAuthHandler(password_mgr=auth) - ) - data = '' - try: - result = opener.open(req) - except urllib.error.HTTPError as e: - if self.show_response: - data = e.fp.read() - result = e.code, e.msg - except urllib.error.URLError as e: - result = 500, str(e) - else: - if self.show_response: - data = self._read_pypi_response(result) - result = 200, 'OK' - if self.show_response: - msg = '\n'.join(('-' * 75, data, '-' * 75)) - self.announce(msg, logging.INFO) - return result diff --git a/spaces/posit/shiny-for-python-template/app.py b/spaces/posit/shiny-for-python-template/app.py deleted file mode 100644 index 2c4dc6eea9e9317c85af90240e5d697ad8efaf17..0000000000000000000000000000000000000000 --- a/spaces/posit/shiny-for-python-template/app.py +++ /dev/null @@ -1,151 +0,0 @@ -from pathlib import Path -from typing import List, Dict, Tuple -import matplotlib.colors as mpl_colors - -import pandas as pd -import seaborn as sns -import shinyswatch - -from shiny import App, Inputs, Outputs, Session, reactive, render, req, ui - -sns.set_theme() - -www_dir = Path(__file__).parent.resolve() / "www" - -df = pd.read_csv(Path(__file__).parent / "penguins.csv", na_values="NA") -numeric_cols: List[str] = df.select_dtypes(include=["float64"]).columns.tolist() -species: List[str] = df["Species"].unique().tolist() -species.sort() - -app_ui = ui.page_fillable( - shinyswatch.theme.minty(), - ui.layout_sidebar( - ui.sidebar( - # Artwork by @allison_horst - ui.input_selectize( - "xvar", - "X variable", - numeric_cols, - selected="Bill Length (mm)", - ), - ui.input_selectize( - "yvar", - "Y variable", - numeric_cols, - selected="Bill Depth (mm)", - ), - ui.input_checkbox_group( - "species", "Filter by species", species, selected=species - ), - ui.hr(), - ui.input_switch("by_species", "Show species", value=True), - ui.input_switch("show_margins", "Show marginal plots", value=True), - ), - ui.output_ui("value_boxes"), - ui.output_plot("scatter", fill=True), - ui.help_text( - "Artwork by ", - ui.a("@allison_horst", href="https://twitter.com/allison_horst"), - class_="text-end", - ), - ), -) - - -def server(input: Inputs, output: Outputs, session: Session): - @reactive.Calc - def filtered_df() -> pd.DataFrame: - """Returns a Pandas data frame that includes only the desired rows""" - - # This calculation "req"uires that at least one species is selected - req(len(input.species()) > 0) - - # Filter the rows so we only include the desired species - return df[df["Species"].isin(input.species())] - - @output - @render.plot - def scatter(): - """Generates a plot for Shiny to display to the user""" - - # The plotting function to use depends on whether margins are desired - plotfunc = sns.jointplot if input.show_margins() else sns.scatterplot - - plotfunc( - data=filtered_df(), - x=input.xvar(), - y=input.yvar(), - palette=palette, - hue="Species" if input.by_species() else None, - hue_order=species, - legend=False, - ) - - @output - @render.ui - def value_boxes(): - df = filtered_df() - - def penguin_value_box(title: str, count: int, bgcol: str, showcase_img: str): - return ui.value_box( - title, - count, - {"class_": "pt-1 pb-0"}, - showcase=ui.fill.as_fill_item( - ui.tags.img( - {"style": "object-fit:contain;"}, - src=showcase_img, - ) - ), - theme_color=None, - style=f"background-color: {bgcol};", - ) - - if not input.by_species(): - return penguin_value_box( - "Penguins", - len(df.index), - bg_palette["default"], - # Artwork by @allison_horst - showcase_img="penguins.png", - ) - - value_boxes = [ - penguin_value_box( - name, - len(df[df["Species"] == name]), - bg_palette[name], - # Artwork by @allison_horst - showcase_img=f"{name}.png", - ) - for name in species - # Only include boxes for _selected_ species - if name in input.species() - ] - - return ui.layout_column_wrap(*value_boxes, width = 1 / len(value_boxes)) - - -# "darkorange", "purple", "cyan4" -colors = [[255, 140, 0], [160, 32, 240], [0, 139, 139]] -colors = [(r / 255.0, g / 255.0, b / 255.0) for r, g, b in colors] - -palette: Dict[str, Tuple[float, float, float]] = { - "Adelie": colors[0], - "Chinstrap": colors[1], - "Gentoo": colors[2], - "default": sns.color_palette()[0], # type: ignore -} - -bg_palette = {} -# Use `sns.set_style("whitegrid")` to help find approx alpha value -for name, col in palette.items(): - # Adjusted n_colors until `axe` accessibility did not complain about color contrast - bg_palette[name] = mpl_colors.to_hex(sns.light_palette(col, n_colors=7)[1]) # type: ignore - - -app = App( - app_ui, - server, - static_assets=str(www_dir), -) diff --git a/spaces/priyam314/Neural_Style_Texture/src/utils/video_utils.py b/spaces/priyam314/Neural_Style_Texture/src/utils/video_utils.py deleted file mode 100644 index 1499a03b09126cdb4dfd7430f8b7c0ee551ff175..0000000000000000000000000000000000000000 --- a/spaces/priyam314/Neural_Style_Texture/src/utils/video_utils.py +++ /dev/null @@ -1,38 +0,0 @@ -import os -import subprocess -import shutil - - -def create_video_from_intermediate_results(results_path): - # - # change this depending on what you want to accomplish (modify out video - # name, change fps and trim video) - # - img_format = (4, '.jpg') - out_file_name = 'out.mp4' - fps = 10 - first_frame = 0 - number_of_frames_to_process = len(os.listdir(results_path)) - ffmpeg = 'ffmpeg' - if shutil.which(ffmpeg): # if ffmpeg is in system path - # example: '%4d.png' for (4, '.png') - img_name_format = '%' + str(img_format[0]) + 'd' + img_format[1] - pattern = os.path.join(results_path, img_name_format) - out_video_path = os.path.join(results_path, out_file_name) - - trim_video_command = [ - '-start_number', - str(first_frame), '-vframes', - str(number_of_frames_to_process) - ] - input_options = ['-r', str(fps), '-i', pattern] - encoding_options = [ - '-c:v', 'libx264', '-crf', '25', '-pix_fmt', 'yuv420p', - '-vf', "pad=ceil(iw/2)*2:ceil(ih/2)*2" - ] - subprocess.call([ - ffmpeg, *input_options, *trim_video_command, *encoding_options, - out_video_path - ]) - else: - print(f'{ffmpeg} not found in the system path, aborting.') diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/designspaceLib/split.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/designspaceLib/split.py deleted file mode 100644 index 0b7cdf4be05dea1e810b4fddf4bf026bc1a50a85..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fontTools/designspaceLib/split.py +++ /dev/null @@ -1,475 +0,0 @@ -"""Allows building all the variable fonts of a DesignSpace version 5 by -splitting the document into interpolable sub-space, then into each VF. -""" - -from __future__ import annotations - -import itertools -import logging -import math -from typing import Any, Callable, Dict, Iterator, List, Tuple, cast - -from fontTools.designspaceLib import ( - AxisDescriptor, - AxisMappingDescriptor, - DesignSpaceDocument, - DiscreteAxisDescriptor, - InstanceDescriptor, - RuleDescriptor, - SimpleLocationDict, - SourceDescriptor, - VariableFontDescriptor, -) -from fontTools.designspaceLib.statNames import StatNames, getStatNames -from fontTools.designspaceLib.types import ( - ConditionSet, - Range, - Region, - getVFUserRegion, - locationInRegion, - regionInRegion, - userRegionToDesignRegion, -) - -LOGGER = logging.getLogger(__name__) - -MakeInstanceFilenameCallable = Callable[ - [DesignSpaceDocument, InstanceDescriptor, StatNames], str -] - - -def defaultMakeInstanceFilename( - doc: DesignSpaceDocument, instance: InstanceDescriptor, statNames: StatNames -) -> str: - """Default callable to synthesize an instance filename - when makeNames=True, for instances that don't specify an instance name - in the designspace. This part of the name generation can be overriden - because it's not specified by the STAT table. - """ - familyName = instance.familyName or statNames.familyNames.get("en") - styleName = instance.styleName or statNames.styleNames.get("en") - return f"{familyName}-{styleName}.ttf" - - -def splitInterpolable( - doc: DesignSpaceDocument, - makeNames: bool = True, - expandLocations: bool = True, - makeInstanceFilename: MakeInstanceFilenameCallable = defaultMakeInstanceFilename, -) -> Iterator[Tuple[SimpleLocationDict, DesignSpaceDocument]]: - """Split the given DS5 into several interpolable sub-designspaces. - There are as many interpolable sub-spaces as there are combinations of - discrete axis values. - - E.g. with axes: - - italic (discrete) Upright or Italic - - style (discrete) Sans or Serif - - weight (continuous) 100 to 900 - - There are 4 sub-spaces in which the Weight axis should interpolate: - (Upright, Sans), (Upright, Serif), (Italic, Sans) and (Italic, Serif). - - The sub-designspaces still include the full axis definitions and STAT data, - but the rules, sources, variable fonts, instances are trimmed down to only - keep what falls within the interpolable sub-space. - - Args: - - ``makeNames``: Whether to compute the instance family and style - names using the STAT data. - - ``expandLocations``: Whether to turn all locations into "full" - locations, including implicit default axis values where missing. - - ``makeInstanceFilename``: Callable to synthesize an instance filename - when makeNames=True, for instances that don't specify an instance name - in the designspace. This part of the name generation can be overridden - because it's not specified by the STAT table. - - .. versionadded:: 5.0 - """ - discreteAxes = [] - interpolableUserRegion: Region = {} - for axis in doc.axes: - if hasattr(axis, "values"): - # Mypy doesn't support narrowing union types via hasattr() - # TODO(Python 3.10): use TypeGuard - # https://mypy.readthedocs.io/en/stable/type_narrowing.html - axis = cast(DiscreteAxisDescriptor, axis) - discreteAxes.append(axis) - else: - axis = cast(AxisDescriptor, axis) - interpolableUserRegion[axis.name] = Range( - axis.minimum, - axis.maximum, - axis.default, - ) - valueCombinations = itertools.product(*[axis.values for axis in discreteAxes]) - for values in valueCombinations: - discreteUserLocation = { - discreteAxis.name: value - for discreteAxis, value in zip(discreteAxes, values) - } - subDoc = _extractSubSpace( - doc, - {**interpolableUserRegion, **discreteUserLocation}, - keepVFs=True, - makeNames=makeNames, - expandLocations=expandLocations, - makeInstanceFilename=makeInstanceFilename, - ) - yield discreteUserLocation, subDoc - - -def splitVariableFonts( - doc: DesignSpaceDocument, - makeNames: bool = False, - expandLocations: bool = False, - makeInstanceFilename: MakeInstanceFilenameCallable = defaultMakeInstanceFilename, -) -> Iterator[Tuple[str, DesignSpaceDocument]]: - """Convert each variable font listed in this document into a standalone - designspace. This can be used to compile all the variable fonts from a - format 5 designspace using tools that can only deal with 1 VF at a time. - - Args: - - ``makeNames``: Whether to compute the instance family and style - names using the STAT data. - - ``expandLocations``: Whether to turn all locations into "full" - locations, including implicit default axis values where missing. - - ``makeInstanceFilename``: Callable to synthesize an instance filename - when makeNames=True, for instances that don't specify an instance name - in the designspace. This part of the name generation can be overridden - because it's not specified by the STAT table. - - .. versionadded:: 5.0 - """ - # Make one DesignspaceDoc v5 for each variable font - for vf in doc.getVariableFonts(): - vfUserRegion = getVFUserRegion(doc, vf) - vfDoc = _extractSubSpace( - doc, - vfUserRegion, - keepVFs=False, - makeNames=makeNames, - expandLocations=expandLocations, - makeInstanceFilename=makeInstanceFilename, - ) - vfDoc.lib = {**vfDoc.lib, **vf.lib} - yield vf.name, vfDoc - - -def convert5to4( - doc: DesignSpaceDocument, -) -> Dict[str, DesignSpaceDocument]: - """Convert each variable font listed in this document into a standalone - format 4 designspace. This can be used to compile all the variable fonts - from a format 5 designspace using tools that only know about format 4. - - .. versionadded:: 5.0 - """ - vfs = {} - for _location, subDoc in splitInterpolable(doc): - for vfName, vfDoc in splitVariableFonts(subDoc): - vfDoc.formatVersion = "4.1" - vfs[vfName] = vfDoc - return vfs - - -def _extractSubSpace( - doc: DesignSpaceDocument, - userRegion: Region, - *, - keepVFs: bool, - makeNames: bool, - expandLocations: bool, - makeInstanceFilename: MakeInstanceFilenameCallable, -) -> DesignSpaceDocument: - subDoc = DesignSpaceDocument() - # Don't include STAT info - # FIXME: (Jany) let's think about it. Not include = OK because the point of - # the splitting is to build VFs and we'll use the STAT data of the full - # document to generate the STAT of the VFs, so "no need" to have STAT data - # in sub-docs. Counterpoint: what if someone wants to split this DS for - # other purposes? Maybe for that it would be useful to also subset the STAT - # data? - # subDoc.elidedFallbackName = doc.elidedFallbackName - - def maybeExpandDesignLocation(object): - if expandLocations: - return object.getFullDesignLocation(doc) - else: - return object.designLocation - - for axis in doc.axes: - range = userRegion[axis.name] - if isinstance(range, Range) and hasattr(axis, "minimum"): - # Mypy doesn't support narrowing union types via hasattr() - # TODO(Python 3.10): use TypeGuard - # https://mypy.readthedocs.io/en/stable/type_narrowing.html - axis = cast(AxisDescriptor, axis) - subDoc.addAxis( - AxisDescriptor( - # Same info - tag=axis.tag, - name=axis.name, - labelNames=axis.labelNames, - hidden=axis.hidden, - # Subset range - minimum=max(range.minimum, axis.minimum), - default=range.default or axis.default, - maximum=min(range.maximum, axis.maximum), - map=[ - (user, design) - for user, design in axis.map - if range.minimum <= user <= range.maximum - ], - # Don't include STAT info - axisOrdering=None, - axisLabels=None, - ) - ) - - subDoc.axisMappings = mappings = [] - subDocAxes = {axis.name for axis in subDoc.axes} - for mapping in doc.axisMappings: - if not all(axis in subDocAxes for axis in mapping.inputLocation.keys()): - continue - if not all(axis in subDocAxes for axis in mapping.outputLocation.keys()): - LOGGER.error( - "In axis mapping from input %s, some output axes are not in the variable-font: %s", - mapping.inputLocation, - mapping.outputLocation, - ) - continue - - mappingAxes = set() - mappingAxes.update(mapping.inputLocation.keys()) - mappingAxes.update(mapping.outputLocation.keys()) - for axis in doc.axes: - if axis.name not in mappingAxes: - continue - range = userRegion[axis.name] - if ( - range.minimum != axis.minimum - or (range.default is not None and range.default != axis.default) - or range.maximum != axis.maximum - ): - LOGGER.error( - "Limiting axis ranges used in elements not supported: %s", - axis.name, - ) - continue - - mappings.append( - AxisMappingDescriptor( - inputLocation=mapping.inputLocation, - outputLocation=mapping.outputLocation, - ) - ) - - # Don't include STAT info - # subDoc.locationLabels = doc.locationLabels - - # Rules: subset them based on conditions - designRegion = userRegionToDesignRegion(doc, userRegion) - subDoc.rules = _subsetRulesBasedOnConditions(doc.rules, designRegion) - subDoc.rulesProcessingLast = doc.rulesProcessingLast - - # Sources: keep only the ones that fall within the kept axis ranges - for source in doc.sources: - if not locationInRegion(doc.map_backward(source.designLocation), userRegion): - continue - - subDoc.addSource( - SourceDescriptor( - filename=source.filename, - path=source.path, - font=source.font, - name=source.name, - designLocation=_filterLocation( - userRegion, maybeExpandDesignLocation(source) - ), - layerName=source.layerName, - familyName=source.familyName, - styleName=source.styleName, - muteKerning=source.muteKerning, - muteInfo=source.muteInfo, - mutedGlyphNames=source.mutedGlyphNames, - ) - ) - - # Copy family name translations from the old default source to the new default - vfDefault = subDoc.findDefault() - oldDefault = doc.findDefault() - if vfDefault is not None and oldDefault is not None: - vfDefault.localisedFamilyName = oldDefault.localisedFamilyName - - # Variable fonts: keep only the ones that fall within the kept axis ranges - if keepVFs: - # Note: call getVariableFont() to make the implicit VFs explicit - for vf in doc.getVariableFonts(): - vfUserRegion = getVFUserRegion(doc, vf) - if regionInRegion(vfUserRegion, userRegion): - subDoc.addVariableFont( - VariableFontDescriptor( - name=vf.name, - filename=vf.filename, - axisSubsets=[ - axisSubset - for axisSubset in vf.axisSubsets - if isinstance(userRegion[axisSubset.name], Range) - ], - lib=vf.lib, - ) - ) - - # Instances: same as Sources + compute missing names - for instance in doc.instances: - if not locationInRegion(instance.getFullUserLocation(doc), userRegion): - continue - - if makeNames: - statNames = getStatNames(doc, instance.getFullUserLocation(doc)) - familyName = instance.familyName or statNames.familyNames.get("en") - styleName = instance.styleName or statNames.styleNames.get("en") - subDoc.addInstance( - InstanceDescriptor( - filename=instance.filename - or makeInstanceFilename(doc, instance, statNames), - path=instance.path, - font=instance.font, - name=instance.name or f"{familyName} {styleName}", - userLocation={} if expandLocations else instance.userLocation, - designLocation=_filterLocation( - userRegion, maybeExpandDesignLocation(instance) - ), - familyName=familyName, - styleName=styleName, - postScriptFontName=instance.postScriptFontName - or statNames.postScriptFontName, - styleMapFamilyName=instance.styleMapFamilyName - or statNames.styleMapFamilyNames.get("en"), - styleMapStyleName=instance.styleMapStyleName - or statNames.styleMapStyleName, - localisedFamilyName=instance.localisedFamilyName - or statNames.familyNames, - localisedStyleName=instance.localisedStyleName - or statNames.styleNames, - localisedStyleMapFamilyName=instance.localisedStyleMapFamilyName - or statNames.styleMapFamilyNames, - localisedStyleMapStyleName=instance.localisedStyleMapStyleName - or {}, - lib=instance.lib, - ) - ) - else: - subDoc.addInstance( - InstanceDescriptor( - filename=instance.filename, - path=instance.path, - font=instance.font, - name=instance.name, - userLocation={} if expandLocations else instance.userLocation, - designLocation=_filterLocation( - userRegion, maybeExpandDesignLocation(instance) - ), - familyName=instance.familyName, - styleName=instance.styleName, - postScriptFontName=instance.postScriptFontName, - styleMapFamilyName=instance.styleMapFamilyName, - styleMapStyleName=instance.styleMapStyleName, - localisedFamilyName=instance.localisedFamilyName, - localisedStyleName=instance.localisedStyleName, - localisedStyleMapFamilyName=instance.localisedStyleMapFamilyName, - localisedStyleMapStyleName=instance.localisedStyleMapStyleName, - lib=instance.lib, - ) - ) - - subDoc.lib = doc.lib - - return subDoc - - -def _conditionSetFrom(conditionSet: List[Dict[str, Any]]) -> ConditionSet: - c: Dict[str, Range] = {} - for condition in conditionSet: - minimum, maximum = condition.get("minimum"), condition.get("maximum") - c[condition["name"]] = Range( - minimum if minimum is not None else -math.inf, - maximum if maximum is not None else math.inf, - ) - return c - - -def _subsetRulesBasedOnConditions( - rules: List[RuleDescriptor], designRegion: Region -) -> List[RuleDescriptor]: - # What rules to keep: - # - Keep the rule if any conditionset is relevant. - # - A conditionset is relevant if all conditions are relevant or it is empty. - # - A condition is relevant if - # - axis is point (C-AP), - # - and point in condition's range (C-AP-in) - # (in this case remove the condition because it's always true) - # - else (C-AP-out) whole conditionset can be discarded (condition false - # => conditionset false) - # - axis is range (C-AR), - # - (C-AR-all) and axis range fully contained in condition range: we can - # scrap the condition because it's always true - # - (C-AR-inter) and intersection(axis range, condition range) not empty: - # keep the condition with the smaller range (= intersection) - # - (C-AR-none) else, whole conditionset can be discarded - newRules: List[RuleDescriptor] = [] - for rule in rules: - newRule: RuleDescriptor = RuleDescriptor( - name=rule.name, conditionSets=[], subs=rule.subs - ) - for conditionset in rule.conditionSets: - cs = _conditionSetFrom(conditionset) - newConditionset: List[Dict[str, Any]] = [] - discardConditionset = False - for selectionName, selectionValue in designRegion.items(): - # TODO: Ensure that all(key in conditionset for key in region.keys())? - if selectionName not in cs: - # raise Exception("Selection has different axes than the rules") - continue - if isinstance(selectionValue, (float, int)): # is point - # Case C-AP-in - if selectionValue in cs[selectionName]: - pass # always matches, conditionset can stay empty for this one. - # Case C-AP-out - else: - discardConditionset = True - else: # is range - # Case C-AR-all - if selectionValue in cs[selectionName]: - pass # always matches, conditionset can stay empty for this one. - else: - intersection = cs[selectionName].intersection(selectionValue) - # Case C-AR-inter - if intersection is not None: - newConditionset.append( - { - "name": selectionName, - "minimum": intersection.minimum, - "maximum": intersection.maximum, - } - ) - # Case C-AR-none - else: - discardConditionset = True - if not discardConditionset: - newRule.conditionSets.append(newConditionset) - if newRule.conditionSets: - newRules.append(newRule) - - return newRules - - -def _filterLocation( - userRegion: Region, - location: Dict[str, float], -) -> Dict[str, float]: - return { - name: value - for name, value in location.items() - if name in userRegion and isinstance(userRegion[name], Range) - } diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/getlimits.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/getlimits.py deleted file mode 100644 index 13414c2a64d688aa96c9cece79bc187210e19589..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/core/getlimits.py +++ /dev/null @@ -1,735 +0,0 @@ -"""Machine limits for Float32 and Float64 and (long double) if available... - -""" -__all__ = ['finfo', 'iinfo'] - -import warnings - -from .._utils import set_module -from ._machar import MachAr -from . import numeric -from . import numerictypes as ntypes -from .numeric import array, inf, NaN -from .umath import log10, exp2, nextafter, isnan - - -def _fr0(a): - """fix rank-0 --> rank-1""" - if a.ndim == 0: - a = a.copy() - a.shape = (1,) - return a - - -def _fr1(a): - """fix rank > 0 --> rank-0""" - if a.size == 1: - a = a.copy() - a.shape = () - return a - - -class MachArLike: - """ Object to simulate MachAr instance """ - def __init__(self, ftype, *, eps, epsneg, huge, tiny, - ibeta, smallest_subnormal=None, **kwargs): - self.params = _MACHAR_PARAMS[ftype] - self.ftype = ftype - self.title = self.params['title'] - # Parameter types same as for discovered MachAr object. - if not smallest_subnormal: - self._smallest_subnormal = nextafter( - self.ftype(0), self.ftype(1), dtype=self.ftype) - else: - self._smallest_subnormal = smallest_subnormal - self.epsilon = self.eps = self._float_to_float(eps) - self.epsneg = self._float_to_float(epsneg) - self.xmax = self.huge = self._float_to_float(huge) - self.xmin = self._float_to_float(tiny) - self.smallest_normal = self.tiny = self._float_to_float(tiny) - self.ibeta = self.params['itype'](ibeta) - self.__dict__.update(kwargs) - self.precision = int(-log10(self.eps)) - self.resolution = self._float_to_float( - self._float_conv(10) ** (-self.precision)) - self._str_eps = self._float_to_str(self.eps) - self._str_epsneg = self._float_to_str(self.epsneg) - self._str_xmin = self._float_to_str(self.xmin) - self._str_xmax = self._float_to_str(self.xmax) - self._str_resolution = self._float_to_str(self.resolution) - self._str_smallest_normal = self._float_to_str(self.xmin) - - @property - def smallest_subnormal(self): - """Return the value for the smallest subnormal. - - Returns - ------- - smallest_subnormal : float - value for the smallest subnormal. - - Warns - ----- - UserWarning - If the calculated value for the smallest subnormal is zero. - """ - # Check that the calculated value is not zero, in case it raises a - # warning. - value = self._smallest_subnormal - if self.ftype(0) == value: - warnings.warn( - 'The value of the smallest subnormal for {} type ' - 'is zero.'.format(self.ftype), UserWarning, stacklevel=2) - - return self._float_to_float(value) - - @property - def _str_smallest_subnormal(self): - """Return the string representation of the smallest subnormal.""" - return self._float_to_str(self.smallest_subnormal) - - def _float_to_float(self, value): - """Converts float to float. - - Parameters - ---------- - value : float - value to be converted. - """ - return _fr1(self._float_conv(value)) - - def _float_conv(self, value): - """Converts float to conv. - - Parameters - ---------- - value : float - value to be converted. - """ - return array([value], self.ftype) - - def _float_to_str(self, value): - """Converts float to str. - - Parameters - ---------- - value : float - value to be converted. - """ - return self.params['fmt'] % array(_fr0(value)[0], self.ftype) - - -_convert_to_float = { - ntypes.csingle: ntypes.single, - ntypes.complex_: ntypes.float_, - ntypes.clongfloat: ntypes.longfloat - } - -# Parameters for creating MachAr / MachAr-like objects -_title_fmt = 'numpy {} precision floating point number' -_MACHAR_PARAMS = { - ntypes.double: dict( - itype = ntypes.int64, - fmt = '%24.16e', - title = _title_fmt.format('double')), - ntypes.single: dict( - itype = ntypes.int32, - fmt = '%15.7e', - title = _title_fmt.format('single')), - ntypes.longdouble: dict( - itype = ntypes.longlong, - fmt = '%s', - title = _title_fmt.format('long double')), - ntypes.half: dict( - itype = ntypes.int16, - fmt = '%12.5e', - title = _title_fmt.format('half'))} - -# Key to identify the floating point type. Key is result of -# ftype('-0.1').newbyteorder('<').tobytes() -# -# 20230201 - use (ftype(-1.0) / ftype(10.0)).newbyteorder('<').tobytes() -# instead because stold may have deficiencies on some platforms. -# See: -# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure - -_KNOWN_TYPES = {} -def _register_type(machar, bytepat): - _KNOWN_TYPES[bytepat] = machar -_float_ma = {} - - -def _register_known_types(): - # Known parameters for float16 - # See docstring of MachAr class for description of parameters. - f16 = ntypes.float16 - float16_ma = MachArLike(f16, - machep=-10, - negep=-11, - minexp=-14, - maxexp=16, - it=10, - iexp=5, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(f16(-10)), - epsneg=exp2(f16(-11)), - huge=f16(65504), - tiny=f16(2 ** -14)) - _register_type(float16_ma, b'f\xae') - _float_ma[16] = float16_ma - - # Known parameters for float32 - f32 = ntypes.float32 - float32_ma = MachArLike(f32, - machep=-23, - negep=-24, - minexp=-126, - maxexp=128, - it=23, - iexp=8, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(f32(-23)), - epsneg=exp2(f32(-24)), - huge=f32((1 - 2 ** -24) * 2**128), - tiny=exp2(f32(-126))) - _register_type(float32_ma, b'\xcd\xcc\xcc\xbd') - _float_ma[32] = float32_ma - - # Known parameters for float64 - f64 = ntypes.float64 - epsneg_f64 = 2.0 ** -53.0 - tiny_f64 = 2.0 ** -1022.0 - float64_ma = MachArLike(f64, - machep=-52, - negep=-53, - minexp=-1022, - maxexp=1024, - it=52, - iexp=11, - ibeta=2, - irnd=5, - ngrd=0, - eps=2.0 ** -52.0, - epsneg=epsneg_f64, - huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4), - tiny=tiny_f64) - _register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf') - _float_ma[64] = float64_ma - - # Known parameters for IEEE 754 128-bit binary float - ld = ntypes.longdouble - epsneg_f128 = exp2(ld(-113)) - tiny_f128 = exp2(ld(-16382)) - # Ignore runtime error when this is not f128 - with numeric.errstate(all='ignore'): - huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4) - float128_ma = MachArLike(ld, - machep=-112, - negep=-113, - minexp=-16382, - maxexp=16384, - it=112, - iexp=15, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-112)), - epsneg=epsneg_f128, - huge=huge_f128, - tiny=tiny_f128) - # IEEE 754 128-bit binary float - _register_type(float128_ma, - b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf') - _float_ma[128] = float128_ma - - # Known parameters for float80 (Intel 80-bit extended precision) - epsneg_f80 = exp2(ld(-64)) - tiny_f80 = exp2(ld(-16382)) - # Ignore runtime error when this is not f80 - with numeric.errstate(all='ignore'): - huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4) - float80_ma = MachArLike(ld, - machep=-63, - negep=-64, - minexp=-16382, - maxexp=16384, - it=63, - iexp=15, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-63)), - epsneg=epsneg_f80, - huge=huge_f80, - tiny=tiny_f80) - # float80, first 10 bytes containing actual storage - _register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf') - _float_ma[80] = float80_ma - - # Guessed / known parameters for double double; see: - # https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic - # These numbers have the same exponent range as float64, but extended number of - # digits in the significand. - huge_dd = nextafter(ld(inf), ld(0), dtype=ld) - # As the smallest_normal in double double is so hard to calculate we set - # it to NaN. - smallest_normal_dd = NaN - # Leave the same value for the smallest subnormal as double - smallest_subnormal_dd = ld(nextafter(0., 1.)) - float_dd_ma = MachArLike(ld, - machep=-105, - negep=-106, - minexp=-1022, - maxexp=1024, - it=105, - iexp=11, - ibeta=2, - irnd=5, - ngrd=0, - eps=exp2(ld(-105)), - epsneg=exp2(ld(-106)), - huge=huge_dd, - tiny=smallest_normal_dd, - smallest_subnormal=smallest_subnormal_dd) - # double double; low, high order (e.g. PPC 64) - _register_type(float_dd_ma, - b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf') - # double double; high, low order (e.g. PPC 64 le) - _register_type(float_dd_ma, - b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<') - _float_ma['dd'] = float_dd_ma - - -def _get_machar(ftype): - """ Get MachAr instance or MachAr-like instance - - Get parameters for floating point type, by first trying signatures of - various known floating point types, then, if none match, attempting to - identify parameters by analysis. - - Parameters - ---------- - ftype : class - Numpy floating point type class (e.g. ``np.float64``) - - Returns - ------- - ma_like : instance of :class:`MachAr` or :class:`MachArLike` - Object giving floating point parameters for `ftype`. - - Warns - ----- - UserWarning - If the binary signature of the float type is not in the dictionary of - known float types. - """ - params = _MACHAR_PARAMS.get(ftype) - if params is None: - raise ValueError(repr(ftype)) - # Detect known / suspected types - # ftype(-1.0) / ftype(10.0) is better than ftype('-0.1') because stold - # may be deficient - key = (ftype(-1.0) / ftype(10.)).newbyteorder('<').tobytes() - ma_like = None - if ftype == ntypes.longdouble: - # Could be 80 bit == 10 byte extended precision, where last bytes can - # be random garbage. - # Comparing first 10 bytes to pattern first to avoid branching on the - # random garbage. - ma_like = _KNOWN_TYPES.get(key[:10]) - if ma_like is None: - # see if the full key is known. - ma_like = _KNOWN_TYPES.get(key) - if ma_like is None and len(key) == 16: - # machine limits could be f80 masquerading as np.float128, - # find all keys with length 16 and make new dict, but make the keys - # only 10 bytes long, the last bytes can be random garbage - _kt = {k[:10]: v for k, v in _KNOWN_TYPES.items() if len(k) == 16} - ma_like = _kt.get(key[:10]) - if ma_like is not None: - return ma_like - # Fall back to parameter discovery - warnings.warn( - f'Signature {key} for {ftype} does not match any known type: ' - 'falling back to type probe function.\n' - 'This warnings indicates broken support for the dtype!', - UserWarning, stacklevel=2) - return _discovered_machar(ftype) - - -def _discovered_machar(ftype): - """ Create MachAr instance with found information on float types - - TODO: MachAr should be retired completely ideally. We currently only - ever use it system with broken longdouble (valgrind, WSL). - """ - params = _MACHAR_PARAMS[ftype] - return MachAr(lambda v: array([v], ftype), - lambda v:_fr0(v.astype(params['itype']))[0], - lambda v:array(_fr0(v)[0], ftype), - lambda v: params['fmt'] % array(_fr0(v)[0], ftype), - params['title']) - - -@set_module('numpy') -class finfo: - """ - finfo(dtype) - - Machine limits for floating point types. - - Attributes - ---------- - bits : int - The number of bits occupied by the type. - dtype : dtype - Returns the dtype for which `finfo` returns information. For complex - input, the returned dtype is the associated ``float*`` dtype for its - real and complex components. - eps : float - The difference between 1.0 and the next smallest representable float - larger than 1.0. For example, for 64-bit binary floats in the IEEE-754 - standard, ``eps = 2**-52``, approximately 2.22e-16. - epsneg : float - The difference between 1.0 and the next smallest representable float - less than 1.0. For example, for 64-bit binary floats in the IEEE-754 - standard, ``epsneg = 2**-53``, approximately 1.11e-16. - iexp : int - The number of bits in the exponent portion of the floating point - representation. - machep : int - The exponent that yields `eps`. - max : floating point number of the appropriate type - The largest representable number. - maxexp : int - The smallest positive power of the base (2) that causes overflow. - min : floating point number of the appropriate type - The smallest representable number, typically ``-max``. - minexp : int - The most negative power of the base (2) consistent with there - being no leading 0's in the mantissa. - negep : int - The exponent that yields `epsneg`. - nexp : int - The number of bits in the exponent including its sign and bias. - nmant : int - The number of bits in the mantissa. - precision : int - The approximate number of decimal digits to which this kind of - float is precise. - resolution : floating point number of the appropriate type - The approximate decimal resolution of this type, i.e., - ``10**-precision``. - tiny : float - An alias for `smallest_normal`, kept for backwards compatibility. - smallest_normal : float - The smallest positive floating point number with 1 as leading bit in - the mantissa following IEEE-754 (see Notes). - smallest_subnormal : float - The smallest positive floating point number with 0 as leading bit in - the mantissa following IEEE-754. - - Parameters - ---------- - dtype : float, dtype, or instance - Kind of floating point or complex floating point - data-type about which to get information. - - See Also - -------- - iinfo : The equivalent for integer data types. - spacing : The distance between a value and the nearest adjacent number - nextafter : The next floating point value after x1 towards x2 - - Notes - ----- - For developers of NumPy: do not instantiate this at the module level. - The initial calculation of these parameters is expensive and negatively - impacts import times. These objects are cached, so calling ``finfo()`` - repeatedly inside your functions is not a problem. - - Note that ``smallest_normal`` is not actually the smallest positive - representable value in a NumPy floating point type. As in the IEEE-754 - standard [1]_, NumPy floating point types make use of subnormal numbers to - fill the gap between 0 and ``smallest_normal``. However, subnormal numbers - may have significantly reduced precision [2]_. - - This function can also be used for complex data types as well. If used, - the output will be the same as the corresponding real float type - (e.g. numpy.finfo(numpy.csingle) is the same as numpy.finfo(numpy.single)). - However, the output is true for the real and imaginary components. - - References - ---------- - .. [1] IEEE Standard for Floating-Point Arithmetic, IEEE Std 754-2008, - pp.1-70, 2008, http://www.doi.org/10.1109/IEEESTD.2008.4610935 - .. [2] Wikipedia, "Denormal Numbers", - https://en.wikipedia.org/wiki/Denormal_number - - Examples - -------- - >>> np.finfo(np.float64).dtype - dtype('float64') - >>> np.finfo(np.complex64).dtype - dtype('float32') - - """ - - _finfo_cache = {} - - def __new__(cls, dtype): - try: - obj = cls._finfo_cache.get(dtype) # most common path - if obj is not None: - return obj - except TypeError: - pass - - if dtype is None: - # Deprecated in NumPy 1.25, 2023-01-16 - warnings.warn( - "finfo() dtype cannot be None. This behavior will " - "raise an error in the future. (Deprecated in NumPy 1.25)", - DeprecationWarning, - stacklevel=2 - ) - - try: - dtype = numeric.dtype(dtype) - except TypeError: - # In case a float instance was given - dtype = numeric.dtype(type(dtype)) - - obj = cls._finfo_cache.get(dtype) - if obj is not None: - return obj - dtypes = [dtype] - newdtype = numeric.obj2sctype(dtype) - if newdtype is not dtype: - dtypes.append(newdtype) - dtype = newdtype - if not issubclass(dtype, numeric.inexact): - raise ValueError("data type %r not inexact" % (dtype)) - obj = cls._finfo_cache.get(dtype) - if obj is not None: - return obj - if not issubclass(dtype, numeric.floating): - newdtype = _convert_to_float[dtype] - if newdtype is not dtype: - # dtype changed, for example from complex128 to float64 - dtypes.append(newdtype) - dtype = newdtype - - obj = cls._finfo_cache.get(dtype, None) - if obj is not None: - # the original dtype was not in the cache, but the new - # dtype is in the cache. we add the original dtypes to - # the cache and return the result - for dt in dtypes: - cls._finfo_cache[dt] = obj - return obj - obj = object.__new__(cls)._init(dtype) - for dt in dtypes: - cls._finfo_cache[dt] = obj - return obj - - def _init(self, dtype): - self.dtype = numeric.dtype(dtype) - machar = _get_machar(dtype) - - for word in ['precision', 'iexp', - 'maxexp', 'minexp', 'negep', - 'machep']: - setattr(self, word, getattr(machar, word)) - for word in ['resolution', 'epsneg', 'smallest_subnormal']: - setattr(self, word, getattr(machar, word).flat[0]) - self.bits = self.dtype.itemsize * 8 - self.max = machar.huge.flat[0] - self.min = -self.max - self.eps = machar.eps.flat[0] - self.nexp = machar.iexp - self.nmant = machar.it - self._machar = machar - self._str_tiny = machar._str_xmin.strip() - self._str_max = machar._str_xmax.strip() - self._str_epsneg = machar._str_epsneg.strip() - self._str_eps = machar._str_eps.strip() - self._str_resolution = machar._str_resolution.strip() - self._str_smallest_normal = machar._str_smallest_normal.strip() - self._str_smallest_subnormal = machar._str_smallest_subnormal.strip() - return self - - def __str__(self): - fmt = ( - 'Machine parameters for %(dtype)s\n' - '---------------------------------------------------------------\n' - 'precision = %(precision)3s resolution = %(_str_resolution)s\n' - 'machep = %(machep)6s eps = %(_str_eps)s\n' - 'negep = %(negep)6s epsneg = %(_str_epsneg)s\n' - 'minexp = %(minexp)6s tiny = %(_str_tiny)s\n' - 'maxexp = %(maxexp)6s max = %(_str_max)s\n' - 'nexp = %(nexp)6s min = -max\n' - 'smallest_normal = %(_str_smallest_normal)s ' - 'smallest_subnormal = %(_str_smallest_subnormal)s\n' - '---------------------------------------------------------------\n' - ) - return fmt % self.__dict__ - - def __repr__(self): - c = self.__class__.__name__ - d = self.__dict__.copy() - d['klass'] = c - return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s," - " max=%(_str_max)s, dtype=%(dtype)s)") % d) - - @property - def smallest_normal(self): - """Return the value for the smallest normal. - - Returns - ------- - smallest_normal : float - Value for the smallest normal. - - Warns - ----- - UserWarning - If the calculated value for the smallest normal is requested for - double-double. - """ - # This check is necessary because the value for smallest_normal is - # platform dependent for longdouble types. - if isnan(self._machar.smallest_normal.flat[0]): - warnings.warn( - 'The value of smallest normal is undefined for double double', - UserWarning, stacklevel=2) - return self._machar.smallest_normal.flat[0] - - @property - def tiny(self): - """Return the value for tiny, alias of smallest_normal. - - Returns - ------- - tiny : float - Value for the smallest normal, alias of smallest_normal. - - Warns - ----- - UserWarning - If the calculated value for the smallest normal is requested for - double-double. - """ - return self.smallest_normal - - -@set_module('numpy') -class iinfo: - """ - iinfo(type) - - Machine limits for integer types. - - Attributes - ---------- - bits : int - The number of bits occupied by the type. - dtype : dtype - Returns the dtype for which `iinfo` returns information. - min : int - The smallest integer expressible by the type. - max : int - The largest integer expressible by the type. - - Parameters - ---------- - int_type : integer type, dtype, or instance - The kind of integer data type to get information about. - - See Also - -------- - finfo : The equivalent for floating point data types. - - Examples - -------- - With types: - - >>> ii16 = np.iinfo(np.int16) - >>> ii16.min - -32768 - >>> ii16.max - 32767 - >>> ii32 = np.iinfo(np.int32) - >>> ii32.min - -2147483648 - >>> ii32.max - 2147483647 - - With instances: - - >>> ii32 = np.iinfo(np.int32(10)) - >>> ii32.min - -2147483648 - >>> ii32.max - 2147483647 - - """ - - _min_vals = {} - _max_vals = {} - - def __init__(self, int_type): - try: - self.dtype = numeric.dtype(int_type) - except TypeError: - self.dtype = numeric.dtype(type(int_type)) - self.kind = self.dtype.kind - self.bits = self.dtype.itemsize * 8 - self.key = "%s%d" % (self.kind, self.bits) - if self.kind not in 'iu': - raise ValueError("Invalid integer data type %r." % (self.kind,)) - - @property - def min(self): - """Minimum value of given dtype.""" - if self.kind == 'u': - return 0 - else: - try: - val = iinfo._min_vals[self.key] - except KeyError: - val = int(-(1 << (self.bits-1))) - iinfo._min_vals[self.key] = val - return val - - @property - def max(self): - """Maximum value of given dtype.""" - try: - val = iinfo._max_vals[self.key] - except KeyError: - if self.kind == 'u': - val = int((1 << self.bits) - 1) - else: - val = int((1 << (self.bits-1)) - 1) - iinfo._max_vals[self.key] = val - return val - - def __str__(self): - """String representation.""" - fmt = ( - 'Machine parameters for %(dtype)s\n' - '---------------------------------------------------------------\n' - 'min = %(min)s\n' - 'max = %(max)s\n' - '---------------------------------------------------------------\n' - ) - return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max} - - def __repr__(self): - return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__, - self.min, self.max, self.dtype) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_join.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_join.py deleted file mode 100644 index f3b12aa22bab026675512e5b84b81dd6e7a2da7c..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/indexes/timedeltas/test_join.py +++ /dev/null @@ -1,55 +0,0 @@ -import numpy as np - -from pandas import ( - Index, - Timedelta, - timedelta_range, -) -import pandas._testing as tm - - -class TestJoin: - def test_append_join_nondatetimeindex(self): - rng = timedelta_range("1 days", periods=10) - idx = Index(["a", "b", "c", "d"]) - - result = rng.append(idx) - assert isinstance(result[0], Timedelta) - - # it works - rng.join(idx, how="outer") - - def test_join_self(self, join_type): - index = timedelta_range("1 day", periods=10) - joined = index.join(index, how=join_type) - tm.assert_index_equal(index, joined) - - def test_does_not_convert_mixed_integer(self): - df = tm.makeCustomDataframe( - 10, - 10, - data_gen_f=lambda *args, **kwargs: np.random.default_rng( - 2 - ).standard_normal(), - r_idx_type="i", - c_idx_type="td", - ) - str(df) - - cols = df.columns.join(df.index, how="outer") - joined = cols.join(df.columns) - assert cols.dtype == np.dtype("O") - assert cols.dtype == joined.dtype - tm.assert_index_equal(cols, joined) - - def test_join_preserves_freq(self): - # GH#32157 - tdi = timedelta_range("1 day", periods=10) - result = tdi[:5].join(tdi[5:], how="outer") - assert result.freq == tdi.freq - tm.assert_index_equal(result, tdi) - - result = tdi[:5].join(tdi[6:], how="outer") - assert result.freq is None - expected = tdi.delete(5) - tm.assert_index_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/pytables/test_round_trip.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/pytables/test_round_trip.py deleted file mode 100644 index 085db5f521a9f023a746a496d4bbaf225ad15412..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/io/pytables/test_round_trip.py +++ /dev/null @@ -1,528 +0,0 @@ -import datetime -import re - -import numpy as np -import pytest - -from pandas._libs.tslibs import Timestamp -from pandas.compat import is_platform_windows - -import pandas as pd -from pandas import ( - DataFrame, - Index, - Series, - _testing as tm, - bdate_range, - read_hdf, -) -from pandas.tests.io.pytables.common import ( - _maybe_remove, - ensure_clean_store, -) -from pandas.util import _test_decorators as td - -pytestmark = pytest.mark.single_cpu - - -def test_conv_read_write(): - with tm.ensure_clean() as path: - - def roundtrip(key, obj, **kwargs): - obj.to_hdf(path, key, **kwargs) - return read_hdf(path, key) - - o = tm.makeTimeSeries() - tm.assert_series_equal(o, roundtrip("series", o)) - - o = tm.makeStringSeries() - tm.assert_series_equal(o, roundtrip("string_series", o)) - - o = tm.makeDataFrame() - tm.assert_frame_equal(o, roundtrip("frame", o)) - - # table - df = DataFrame({"A": range(5), "B": range(5)}) - df.to_hdf(path, "table", append=True) - result = read_hdf(path, "table", where=["index>2"]) - tm.assert_frame_equal(df[df.index > 2], result) - - -def test_long_strings(setup_path): - # GH6166 - df = DataFrame({"a": tm.makeStringIndex(10)}, index=tm.makeStringIndex(10)) - - with ensure_clean_store(setup_path) as store: - store.append("df", df, data_columns=["a"]) - - result = store.select("df") - tm.assert_frame_equal(df, result) - - -def test_api(tmp_path, setup_path): - # GH4584 - # API issue when to_hdf doesn't accept append AND format args - path = tmp_path / setup_path - - df = tm.makeDataFrame() - df.iloc[:10].to_hdf(path, "df", append=True, format="table") - df.iloc[10:].to_hdf(path, "df", append=True, format="table") - tm.assert_frame_equal(read_hdf(path, "df"), df) - - # append to False - df.iloc[:10].to_hdf(path, "df", append=False, format="table") - df.iloc[10:].to_hdf(path, "df", append=True, format="table") - tm.assert_frame_equal(read_hdf(path, "df"), df) - - -def test_api_append(tmp_path, setup_path): - path = tmp_path / setup_path - - df = tm.makeDataFrame() - df.iloc[:10].to_hdf(path, "df", append=True) - df.iloc[10:].to_hdf(path, "df", append=True, format="table") - tm.assert_frame_equal(read_hdf(path, "df"), df) - - # append to False - df.iloc[:10].to_hdf(path, "df", append=False, format="table") - df.iloc[10:].to_hdf(path, "df", append=True) - tm.assert_frame_equal(read_hdf(path, "df"), df) - - -def test_api_2(tmp_path, setup_path): - path = tmp_path / setup_path - - df = tm.makeDataFrame() - df.to_hdf(path, "df", append=False, format="fixed") - tm.assert_frame_equal(read_hdf(path, "df"), df) - - df.to_hdf(path, "df", append=False, format="f") - tm.assert_frame_equal(read_hdf(path, "df"), df) - - df.to_hdf(path, "df", append=False) - tm.assert_frame_equal(read_hdf(path, "df"), df) - - df.to_hdf(path, "df") - tm.assert_frame_equal(read_hdf(path, "df"), df) - - with ensure_clean_store(setup_path) as store: - df = tm.makeDataFrame() - - _maybe_remove(store, "df") - store.append("df", df.iloc[:10], append=True, format="table") - store.append("df", df.iloc[10:], append=True, format="table") - tm.assert_frame_equal(store.select("df"), df) - - # append to False - _maybe_remove(store, "df") - store.append("df", df.iloc[:10], append=False, format="table") - store.append("df", df.iloc[10:], append=True, format="table") - tm.assert_frame_equal(store.select("df"), df) - - # formats - _maybe_remove(store, "df") - store.append("df", df.iloc[:10], append=False, format="table") - store.append("df", df.iloc[10:], append=True, format="table") - tm.assert_frame_equal(store.select("df"), df) - - _maybe_remove(store, "df") - store.append("df", df.iloc[:10], append=False, format="table") - store.append("df", df.iloc[10:], append=True, format=None) - tm.assert_frame_equal(store.select("df"), df) - - -def test_api_invalid(tmp_path, setup_path): - path = tmp_path / setup_path - # Invalid. - df = tm.makeDataFrame() - - msg = "Can only append to Tables" - - with pytest.raises(ValueError, match=msg): - df.to_hdf(path, "df", append=True, format="f") - - with pytest.raises(ValueError, match=msg): - df.to_hdf(path, "df", append=True, format="fixed") - - msg = r"invalid HDFStore format specified \[foo\]" - - with pytest.raises(TypeError, match=msg): - df.to_hdf(path, "df", append=True, format="foo") - - with pytest.raises(TypeError, match=msg): - df.to_hdf(path, "df", append=False, format="foo") - - # File path doesn't exist - path = "" - msg = f"File {path} does not exist" - - with pytest.raises(FileNotFoundError, match=msg): - read_hdf(path, "df") - - -def test_get(setup_path): - with ensure_clean_store(setup_path) as store: - store["a"] = tm.makeTimeSeries() - left = store.get("a") - right = store["a"] - tm.assert_series_equal(left, right) - - left = store.get("/a") - right = store["/a"] - tm.assert_series_equal(left, right) - - with pytest.raises(KeyError, match="'No object named b in the file'"): - store.get("b") - - -def test_put_integer(setup_path): - # non-date, non-string index - df = DataFrame(np.random.default_rng(2).standard_normal((50, 100))) - _check_roundtrip(df, tm.assert_frame_equal, setup_path) - - -def test_table_values_dtypes_roundtrip(setup_path): - with ensure_clean_store(setup_path) as store: - df1 = DataFrame({"a": [1, 2, 3]}, dtype="f8") - store.append("df_f8", df1) - tm.assert_series_equal(df1.dtypes, store["df_f8"].dtypes) - - df2 = DataFrame({"a": [1, 2, 3]}, dtype="i8") - store.append("df_i8", df2) - tm.assert_series_equal(df2.dtypes, store["df_i8"].dtypes) - - # incompatible dtype - msg = re.escape( - "invalid combination of [values_axes] on appending data " - "[name->values_block_0,cname->values_block_0," - "dtype->float64,kind->float,shape->(1, 3)] vs " - "current table [name->values_block_0," - "cname->values_block_0,dtype->int64,kind->integer," - "shape->None]" - ) - with pytest.raises(ValueError, match=msg): - store.append("df_i8", df1) - - # check creation/storage/retrieval of float32 (a bit hacky to - # actually create them thought) - df1 = DataFrame(np.array([[1], [2], [3]], dtype="f4"), columns=["A"]) - store.append("df_f4", df1) - tm.assert_series_equal(df1.dtypes, store["df_f4"].dtypes) - assert df1.dtypes.iloc[0] == "float32" - - # check with mixed dtypes - df1 = DataFrame( - { - c: Series(np.random.default_rng(2).integers(5), dtype=c) - for c in ["float32", "float64", "int32", "int64", "int16", "int8"] - } - ) - df1["string"] = "foo" - df1["float322"] = 1.0 - df1["float322"] = df1["float322"].astype("float32") - df1["bool"] = df1["float32"] > 0 - df1["time1"] = Timestamp("20130101") - df1["time2"] = Timestamp("20130102") - - store.append("df_mixed_dtypes1", df1) - result = store.select("df_mixed_dtypes1").dtypes.value_counts() - result.index = [str(i) for i in result.index] - expected = Series( - { - "float32": 2, - "float64": 1, - "int32": 1, - "bool": 1, - "int16": 1, - "int8": 1, - "int64": 1, - "object": 1, - "datetime64[ns]": 2, - }, - name="count", - ) - result = result.sort_index() - expected = expected.sort_index() - tm.assert_series_equal(result, expected) - - -@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning") -def test_series(setup_path): - s = tm.makeStringSeries() - _check_roundtrip(s, tm.assert_series_equal, path=setup_path) - - ts = tm.makeTimeSeries() - _check_roundtrip(ts, tm.assert_series_equal, path=setup_path) - - ts2 = Series(ts.index, Index(ts.index, dtype=object)) - _check_roundtrip(ts2, tm.assert_series_equal, path=setup_path) - - ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object)) - _check_roundtrip( - ts3, tm.assert_series_equal, path=setup_path, check_index_type=False - ) - - -def test_float_index(setup_path): - # GH #454 - index = np.random.default_rng(2).standard_normal(10) - s = Series(np.random.default_rng(2).standard_normal(10), index=index) - _check_roundtrip(s, tm.assert_series_equal, path=setup_path) - - -def test_tuple_index(setup_path): - # GH #492 - col = np.arange(10) - idx = [(0.0, 1.0), (2.0, 3.0), (4.0, 5.0)] - data = np.random.default_rng(2).standard_normal(30).reshape((3, 10)) - DF = DataFrame(data, index=idx, columns=col) - - with tm.assert_produces_warning(pd.errors.PerformanceWarning): - _check_roundtrip(DF, tm.assert_frame_equal, path=setup_path) - - -@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning") -def test_index_types(setup_path): - values = np.random.default_rng(2).standard_normal(2) - - func = lambda lhs, rhs: tm.assert_series_equal(lhs, rhs, check_index_type=True) - - ser = Series(values, [0, "y"]) - _check_roundtrip(ser, func, path=setup_path) - - ser = Series(values, [datetime.datetime.today(), 0]) - _check_roundtrip(ser, func, path=setup_path) - - ser = Series(values, ["y", 0]) - _check_roundtrip(ser, func, path=setup_path) - - ser = Series(values, [datetime.date.today(), "a"]) - _check_roundtrip(ser, func, path=setup_path) - - ser = Series(values, [0, "y"]) - _check_roundtrip(ser, func, path=setup_path) - - ser = Series(values, [datetime.datetime.today(), 0]) - _check_roundtrip(ser, func, path=setup_path) - - ser = Series(values, ["y", 0]) - _check_roundtrip(ser, func, path=setup_path) - - ser = Series(values, [datetime.date.today(), "a"]) - _check_roundtrip(ser, func, path=setup_path) - - ser = Series(values, [1.23, "b"]) - _check_roundtrip(ser, func, path=setup_path) - - ser = Series(values, [1, 1.53]) - _check_roundtrip(ser, func, path=setup_path) - - ser = Series(values, [1, 5]) - _check_roundtrip(ser, func, path=setup_path) - - ser = Series(values, [datetime.datetime(2012, 1, 1), datetime.datetime(2012, 1, 2)]) - _check_roundtrip(ser, func, path=setup_path) - - -def test_timeseries_preepoch(setup_path, request): - dr = bdate_range("1/1/1940", "1/1/1960") - ts = Series(np.random.default_rng(2).standard_normal(len(dr)), index=dr) - try: - _check_roundtrip(ts, tm.assert_series_equal, path=setup_path) - except OverflowError: - if is_platform_windows(): - request.node.add_marker( - pytest.mark.xfail("known failure on some windows platforms") - ) - raise - - -@pytest.mark.parametrize( - "compression", [False, pytest.param(True, marks=td.skip_if_windows)] -) -def test_frame(compression, setup_path): - df = tm.makeDataFrame() - - # put in some random NAs - df.iloc[0, 0] = np.nan - df.iloc[5, 3] = np.nan - - _check_roundtrip_table( - df, tm.assert_frame_equal, path=setup_path, compression=compression - ) - _check_roundtrip( - df, tm.assert_frame_equal, path=setup_path, compression=compression - ) - - tdf = tm.makeTimeDataFrame() - _check_roundtrip( - tdf, tm.assert_frame_equal, path=setup_path, compression=compression - ) - - with ensure_clean_store(setup_path) as store: - # not consolidated - df["foo"] = np.random.default_rng(2).standard_normal(len(df)) - store["df"] = df - recons = store["df"] - assert recons._mgr.is_consolidated() - - # empty - _check_roundtrip(df[:0], tm.assert_frame_equal, path=setup_path) - - -def test_empty_series_frame(setup_path): - s0 = Series(dtype=object) - s1 = Series(name="myseries", dtype=object) - df0 = DataFrame() - df1 = DataFrame(index=["a", "b", "c"]) - df2 = DataFrame(columns=["d", "e", "f"]) - - _check_roundtrip(s0, tm.assert_series_equal, path=setup_path) - _check_roundtrip(s1, tm.assert_series_equal, path=setup_path) - _check_roundtrip(df0, tm.assert_frame_equal, path=setup_path) - _check_roundtrip(df1, tm.assert_frame_equal, path=setup_path) - _check_roundtrip(df2, tm.assert_frame_equal, path=setup_path) - - -@pytest.mark.parametrize("dtype", [np.int64, np.float64, object, "m8[ns]", "M8[ns]"]) -def test_empty_series(dtype, setup_path): - s = Series(dtype=dtype) - _check_roundtrip(s, tm.assert_series_equal, path=setup_path) - - -def test_can_serialize_dates(setup_path): - rng = [x.date() for x in bdate_range("1/1/2000", "1/30/2000")] - frame = DataFrame( - np.random.default_rng(2).standard_normal((len(rng), 4)), index=rng - ) - - _check_roundtrip(frame, tm.assert_frame_equal, path=setup_path) - - -def test_store_hierarchical(setup_path, multiindex_dataframe_random_data): - frame = multiindex_dataframe_random_data - - _check_roundtrip(frame, tm.assert_frame_equal, path=setup_path) - _check_roundtrip(frame.T, tm.assert_frame_equal, path=setup_path) - _check_roundtrip(frame["A"], tm.assert_series_equal, path=setup_path) - - # check that the names are stored - with ensure_clean_store(setup_path) as store: - store["frame"] = frame - recons = store["frame"] - tm.assert_frame_equal(recons, frame) - - -@pytest.mark.parametrize( - "compression", [False, pytest.param(True, marks=td.skip_if_windows)] -) -def test_store_mixed(compression, setup_path): - def _make_one(): - df = tm.makeDataFrame() - df["obj1"] = "foo" - df["obj2"] = "bar" - df["bool1"] = df["A"] > 0 - df["bool2"] = df["B"] > 0 - df["int1"] = 1 - df["int2"] = 2 - return df._consolidate() - - df1 = _make_one() - df2 = _make_one() - - _check_roundtrip(df1, tm.assert_frame_equal, path=setup_path) - _check_roundtrip(df2, tm.assert_frame_equal, path=setup_path) - - with ensure_clean_store(setup_path) as store: - store["obj"] = df1 - tm.assert_frame_equal(store["obj"], df1) - store["obj"] = df2 - tm.assert_frame_equal(store["obj"], df2) - - # check that can store Series of all of these types - _check_roundtrip( - df1["obj1"], - tm.assert_series_equal, - path=setup_path, - compression=compression, - ) - _check_roundtrip( - df1["bool1"], - tm.assert_series_equal, - path=setup_path, - compression=compression, - ) - _check_roundtrip( - df1["int1"], - tm.assert_series_equal, - path=setup_path, - compression=compression, - ) - - -def _check_roundtrip(obj, comparator, path, compression=False, **kwargs): - options = {} - if compression: - options["complib"] = "blosc" - - with ensure_clean_store(path, "w", **options) as store: - store["obj"] = obj - retrieved = store["obj"] - comparator(retrieved, obj, **kwargs) - - -def _check_roundtrip_table(obj, comparator, path, compression=False): - options = {} - if compression: - options["complib"] = "blosc" - - with ensure_clean_store(path, "w", **options) as store: - store.put("obj", obj, format="table") - retrieved = store["obj"] - - comparator(retrieved, obj) - - -def test_unicode_index(setup_path): - unicode_values = ["\u03c3", "\u03c3\u03c3"] - - s = Series( - np.random.default_rng(2).standard_normal(len(unicode_values)), - unicode_values, - ) - _check_roundtrip(s, tm.assert_series_equal, path=setup_path) - - -def test_unicode_longer_encoded(setup_path): - # GH 11234 - char = "\u0394" - df = DataFrame({"A": [char]}) - with ensure_clean_store(setup_path) as store: - store.put("df", df, format="table", encoding="utf-8") - result = store.get("df") - tm.assert_frame_equal(result, df) - - df = DataFrame({"A": ["a", char], "B": ["b", "b"]}) - with ensure_clean_store(setup_path) as store: - store.put("df", df, format="table", encoding="utf-8") - result = store.get("df") - tm.assert_frame_equal(result, df) - - -def test_store_datetime_mixed(setup_path): - df = DataFrame({"a": [1, 2, 3], "b": [1.0, 2.0, 3.0], "c": ["a", "b", "c"]}) - ts = tm.makeTimeSeries() - df["d"] = ts.index[:3] - _check_roundtrip(df, tm.assert_frame_equal, path=setup_path) - - -def test_round_trip_equals(tmp_path, setup_path): - # GH 9330 - df = DataFrame({"B": [1, 2], "A": ["x", "y"]}) - - path = tmp_path / setup_path - df.to_hdf(path, "df", format="table") - other = read_hdf(path, "df") - tm.assert_frame_equal(df, other) - assert df.equals(other) - assert other.equals(df) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/deprecated/tools.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/deprecated/tools.py deleted file mode 100644 index 2b05d38ef22c15e67bc7413a0a4c059a64801164..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pydantic/deprecated/tools.py +++ /dev/null @@ -1,96 +0,0 @@ -from __future__ import annotations - -import json -import warnings -from typing import TYPE_CHECKING, Any, Callable, Type, TypeVar, Union - -from typing_extensions import deprecated - -from ..json_schema import DEFAULT_REF_TEMPLATE, GenerateJsonSchema -from ..type_adapter import TypeAdapter -from ..warnings import PydanticDeprecatedSince20 - -if not TYPE_CHECKING: - # See PyCharm issues https://youtrack.jetbrains.com/issue/PY-21915 - # and https://youtrack.jetbrains.com/issue/PY-51428 - DeprecationWarning = PydanticDeprecatedSince20 - -__all__ = 'parse_obj_as', 'schema_of', 'schema_json_of' - -NameFactory = Union[str, Callable[[Type[Any]], str]] - - -T = TypeVar('T') - - -@deprecated( - 'parse_obj_as is deprecated. Use pydantic.TypeAdapter.validate_python instead.', category=PydanticDeprecatedSince20 -) -def parse_obj_as(type_: type[T], obj: Any, type_name: NameFactory | None = None) -> T: - warnings.warn( - 'parse_obj_as is deprecated. Use pydantic.TypeAdapter.validate_python instead.', - DeprecationWarning, - stacklevel=2, - ) - if type_name is not None: # pragma: no cover - warnings.warn( - 'The type_name parameter is deprecated. parse_obj_as no longer creates temporary models', - DeprecationWarning, - stacklevel=2, - ) - return TypeAdapter(type_).validate_python(obj) - - -@deprecated( - 'schema_of is deprecated. Use pydantic.TypeAdapter.json_schema instead.', category=PydanticDeprecatedSince20 -) -def schema_of( - type_: Any, - *, - title: NameFactory | None = None, - by_alias: bool = True, - ref_template: str = DEFAULT_REF_TEMPLATE, - schema_generator: type[GenerateJsonSchema] = GenerateJsonSchema, -) -> dict[str, Any]: - """Generate a JSON schema (as dict) for the passed model or dynamically generated one.""" - warnings.warn( - 'schema_of is deprecated. Use pydantic.TypeAdapter.json_schema instead.', DeprecationWarning, stacklevel=2 - ) - res = TypeAdapter(type_).json_schema( - by_alias=by_alias, - schema_generator=schema_generator, - ref_template=ref_template, - ) - if title is not None: - if isinstance(title, str): - res['title'] = title - else: - warnings.warn( - 'Passing a callable for the `title` parameter is deprecated and no longer supported', - DeprecationWarning, - stacklevel=2, - ) - res['title'] = title(type_) - return res - - -@deprecated( - 'schema_json_of is deprecated. Use pydantic.TypeAdapter.json_schema instead.', category=PydanticDeprecatedSince20 -) -def schema_json_of( - type_: Any, - *, - title: NameFactory | None = None, - by_alias: bool = True, - ref_template: str = DEFAULT_REF_TEMPLATE, - schema_generator: type[GenerateJsonSchema] = GenerateJsonSchema, - **dumps_kwargs: Any, -) -> str: - """Generate a JSON schema (as JSON) for the passed model or dynamically generated one.""" - warnings.warn( - 'schema_json_of is deprecated. Use pydantic.TypeAdapter.json_schema instead.', DeprecationWarning, stacklevel=2 - ) - return json.dumps( - schema_of(type_, title=title, by_alias=by_alias, ref_template=ref_template, schema_generator=schema_generator), - **dumps_kwargs, - ) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/_distutils/command/install_headers.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/_distutils/command/install_headers.py deleted file mode 100644 index 9bb0b18dc0d809dbc03d9ca355818b3bb0af573b..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/_distutils/command/install_headers.py +++ /dev/null @@ -1,47 +0,0 @@ -"""distutils.command.install_headers - -Implements the Distutils 'install_headers' command, to install C/C++ header -files to the Python include directory.""" - -from distutils.core import Command - - -# XXX force is never used -class install_headers(Command): - - description = "install C/C++ header files" - - user_options = [('install-dir=', 'd', - "directory to install header files to"), - ('force', 'f', - "force installation (overwrite existing files)"), - ] - - boolean_options = ['force'] - - def initialize_options(self): - self.install_dir = None - self.force = 0 - self.outfiles = [] - - def finalize_options(self): - self.set_undefined_options('install', - ('install_headers', 'install_dir'), - ('force', 'force')) - - - def run(self): - headers = self.distribution.headers - if not headers: - return - - self.mkpath(self.install_dir) - for header in headers: - (out, _) = self.copy_file(header, self.install_dir) - self.outfiles.append(out) - - def get_inputs(self): - return self.distribution.headers or [] - - def get_outputs(self): - return self.outfiles diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/command/dist_info.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/command/dist_info.py deleted file mode 100644 index c45258fa03a3ddd6a73db4514365f8741d16ca86..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/setuptools/command/dist_info.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -Create a dist_info directory -As defined in the wheel specification -""" - -import os - -from distutils.core import Command -from distutils import log - - -class dist_info(Command): - - description = 'create a .dist-info directory' - - user_options = [ - ('egg-base=', 'e', "directory containing .egg-info directories" - " (default: top of the source tree)"), - ] - - def initialize_options(self): - self.egg_base = None - - def finalize_options(self): - pass - - def run(self): - egg_info = self.get_finalized_command('egg_info') - egg_info.egg_base = self.egg_base - egg_info.finalize_options() - egg_info.run() - dist_info_dir = egg_info.egg_info[:-len('.egg-info')] + '.dist-info' - log.info("creating '{}'".format(os.path.abspath(dist_info_dir))) - - bdist_wheel = self.get_finalized_command('bdist_wheel') - bdist_wheel.egg2dist(egg_info.egg_info, dist_info_dir) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tqdm/_tqdm.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tqdm/_tqdm.py deleted file mode 100644 index 7fc4962774a4651db7a739a3f143633b6215a9bd..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/tqdm/_tqdm.py +++ /dev/null @@ -1,9 +0,0 @@ -from warnings import warn - -from .std import * # NOQA -from .std import __all__ # NOQA -from .std import TqdmDeprecationWarning - -warn("This function will be removed in tqdm==5.0.0\n" - "Please use `tqdm.std.*` instead of `tqdm._tqdm.*`", - TqdmDeprecationWarning, stacklevel=2) diff --git a/spaces/qinzhu/diy-girlfriend-online/text/english.py b/spaces/qinzhu/diy-girlfriend-online/text/english.py deleted file mode 100644 index 6817392ba8a9eb830351de89fb7afc5ad72f5e42..0000000000000000000000000000000000000000 --- a/spaces/qinzhu/diy-girlfriend-online/text/english.py +++ /dev/null @@ -1,188 +0,0 @@ -""" from https://github.com/keithito/tacotron """ - -''' -Cleaners are transformations that run over the input text at both training and eval time. - -Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" -hyperparameter. Some cleaners are English-specific. You'll typically want to use: - 1. "english_cleaners" for English text - 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using - the Unidecode library (https://pypi.python.org/pypi/Unidecode) - 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update - the symbols in symbols.py to match your data). -''' - - -# Regular expression matching whitespace: - - -import re -import inflect -from unidecode import unidecode -import eng_to_ipa as ipa -_inflect = inflect.engine() -_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])') -_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)') -_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)') -_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)') -_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)') -_number_re = re.compile(r'[0-9]+') - -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - - -# List of (ipa, lazy ipa) pairs: -_lazy_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('æ', 'e'), - ('ɑ', 'a'), - ('ɔ', 'o'), - ('ð', 'z'), - ('θ', 's'), - ('ɛ', 'e'), - ('ɪ', 'i'), - ('ʊ', 'u'), - ('ʒ', 'ʥ'), - ('ʤ', 'ʥ'), - ('ˈ', '↓'), -]] - -# List of (ipa, lazy ipa2) pairs: -_lazy_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('ð', 'z'), - ('θ', 's'), - ('ʒ', 'ʑ'), - ('ʤ', 'dʑ'), - ('ˈ', '↓'), -]] - -# List of (ipa, ipa2) pairs -_ipa_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('r', 'ɹ'), - ('ʤ', 'dʒ'), - ('ʧ', 'tʃ') -]] - - -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text - - -def collapse_whitespace(text): - return re.sub(r'\s+', ' ', text) - - -def _remove_commas(m): - return m.group(1).replace(',', '') - - -def _expand_decimal_point(m): - return m.group(1).replace('.', ' point ') - - -def _expand_dollars(m): - match = m.group(1) - parts = match.split('.') - if len(parts) > 2: - return match + ' dollars' # Unexpected format - dollars = int(parts[0]) if parts[0] else 0 - cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 - if dollars and cents: - dollar_unit = 'dollar' if dollars == 1 else 'dollars' - cent_unit = 'cent' if cents == 1 else 'cents' - return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit) - elif dollars: - dollar_unit = 'dollar' if dollars == 1 else 'dollars' - return '%s %s' % (dollars, dollar_unit) - elif cents: - cent_unit = 'cent' if cents == 1 else 'cents' - return '%s %s' % (cents, cent_unit) - else: - return 'zero dollars' - - -def _expand_ordinal(m): - return _inflect.number_to_words(m.group(0)) - - -def _expand_number(m): - num = int(m.group(0)) - if num > 1000 and num < 3000: - if num == 2000: - return 'two thousand' - elif num > 2000 and num < 2010: - return 'two thousand ' + _inflect.number_to_words(num % 100) - elif num % 100 == 0: - return _inflect.number_to_words(num // 100) + ' hundred' - else: - return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ') - else: - return _inflect.number_to_words(num, andword='') - - -def normalize_numbers(text): - text = re.sub(_comma_number_re, _remove_commas, text) - text = re.sub(_pounds_re, r'\1 pounds', text) - text = re.sub(_dollars_re, _expand_dollars, text) - text = re.sub(_decimal_number_re, _expand_decimal_point, text) - text = re.sub(_ordinal_re, _expand_ordinal, text) - text = re.sub(_number_re, _expand_number, text) - return text - - -def mark_dark_l(text): - return re.sub(r'l([^aeiouæɑɔəɛɪʊ ]*(?: |$))', lambda x: 'ɫ'+x.group(1), text) - - -def english_to_ipa(text): - text = unidecode(text).lower() - text = expand_abbreviations(text) - text = normalize_numbers(text) - phonemes = ipa.convert(text) - phonemes = collapse_whitespace(phonemes) - return phonemes - - -def english_to_lazy_ipa(text): - text = english_to_ipa(text) - for regex, replacement in _lazy_ipa: - text = re.sub(regex, replacement, text) - return text - - -def english_to_ipa2(text): - text = english_to_ipa(text) - text = mark_dark_l(text) - for regex, replacement in _ipa_to_ipa2: - text = re.sub(regex, replacement, text) - return text.replace('...', '…') - - -def english_to_lazy_ipa2(text): - text = english_to_ipa(text) - for regex, replacement in _lazy_ipa2: - text = re.sub(regex, replacement, text) - return text diff --git a/spaces/rachana219/MODT2/utils/google_utils.py b/spaces/rachana219/MODT2/utils/google_utils.py deleted file mode 100644 index f363408e63981702e63dcda189cbc2099d0a9499..0000000000000000000000000000000000000000 --- a/spaces/rachana219/MODT2/utils/google_utils.py +++ /dev/null @@ -1,123 +0,0 @@ -# Google utils: https://cloud.google.com/storage/docs/reference/libraries - -import os -import platform -import subprocess -import time -from pathlib import Path - -import requests -import torch - - -def gsutil_getsize(url=''): - # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du - s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') - return eval(s.split(' ')[0]) if len(s) else 0 # bytes - - -def attempt_download(file, repo='WongKinYiu/yolov7'): - # Attempt file download if does not exist - file = Path(str(file).strip().replace("'", '').lower()) - - if not file.exists(): - try: - response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api - assets = [x['name'] for x in response['assets']] # release assets - tag = response['tag_name'] # i.e. 'v1.0' - except: # fallback plan - assets = ['yolov7.pt', 'yolov7-tiny.pt', 'yolov7x.pt', 'yolov7-d6.pt', 'yolov7-e6.pt', - 'yolov7-e6e.pt', 'yolov7-w6.pt'] - tag = subprocess.check_output('git tag', shell=True).decode().split()[-1] - - name = file.name - if name in assets: - msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/' - redundant = False # second download option - try: # GitHub - url = f'https://github.com/{repo}/releases/download/{tag}/{name}' - print(f'Downloading {url} to {file}...') - torch.hub.download_url_to_file(url, file) - assert file.exists() and file.stat().st_size > 1E6 # check - except Exception as e: # GCP - print(f'Download error: {e}') - assert redundant, 'No secondary mirror' - url = f'https://storage.googleapis.com/{repo}/ckpt/{name}' - print(f'Downloading {url} to {file}...') - os.system(f'curl -L {url} -o {file}') # torch.hub.download_url_to_file(url, weights) - finally: - if not file.exists() or file.stat().st_size < 1E6: # check - file.unlink(missing_ok=True) # remove partial downloads - print(f'ERROR: Download failure: {msg}') - print('') - return - - -def gdrive_download(id='', file='tmp.zip'): - # Downloads a file from Google Drive. from yolov7.utils.google_utils import *; gdrive_download() - t = time.time() - file = Path(file) - cookie = Path('cookie') # gdrive cookie - print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') - file.unlink(missing_ok=True) # remove existing file - cookie.unlink(missing_ok=True) # remove existing cookie - - # Attempt file download - out = "NUL" if platform.system() == "Windows" else "/dev/null" - os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') - if os.path.exists('cookie'): # large file - s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' - else: # small file - s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' - r = os.system(s) # execute, capture return - cookie.unlink(missing_ok=True) # remove existing cookie - - # Error check - if r != 0: - file.unlink(missing_ok=True) # remove partial - print('Download error ') # raise Exception('Download error') - return r - - # Unzip if archive - if file.suffix == '.zip': - print('unzipping... ', end='') - os.system(f'unzip -q {file}') # unzip - file.unlink() # remove zip to free space - - print(f'Done ({time.time() - t:.1f}s)') - return r - - -def get_token(cookie="./cookie"): - with open(cookie) as f: - for line in f: - if "download" in line: - return line.split()[-1] - return "" - -# def upload_blob(bucket_name, source_file_name, destination_blob_name): -# # Uploads a file to a bucket -# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python -# -# storage_client = storage.Client() -# bucket = storage_client.get_bucket(bucket_name) -# blob = bucket.blob(destination_blob_name) -# -# blob.upload_from_filename(source_file_name) -# -# print('File {} uploaded to {}.'.format( -# source_file_name, -# destination_blob_name)) -# -# -# def download_blob(bucket_name, source_blob_name, destination_file_name): -# # Uploads a blob from a bucket -# storage_client = storage.Client() -# bucket = storage_client.get_bucket(bucket_name) -# blob = bucket.blob(source_blob_name) -# -# blob.download_to_filename(destination_file_name) -# -# print('Blob {} downloaded to {}.'.format( -# source_blob_name, -# destination_file_name)) diff --git a/spaces/radames/MusicGen-Continuation/tests/models/test_musicgen.py b/spaces/radames/MusicGen-Continuation/tests/models/test_musicgen.py deleted file mode 100644 index 53eff4405ab7de18e0ae18df8c8f9959a1c9e031..0000000000000000000000000000000000000000 --- a/spaces/radames/MusicGen-Continuation/tests/models/test_musicgen.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import pytest -import torch - -from audiocraft.models import MusicGen - - -class TestSEANetModel: - def get_musicgen(self): - mg = MusicGen.get_pretrained(name='debug', device='cpu') - mg.set_generation_params(duration=2.0) - return mg - - def test_base(self): - mg = self.get_musicgen() - assert mg.frame_rate == 25 - assert mg.sample_rate == 32000 - assert mg.audio_channels == 1 - - def test_generate_unconditional(self): - mg = self.get_musicgen() - wav = mg.generate_unconditional(3) - assert list(wav.shape) == [3, 1, 64000] - - def test_generate_continuation(self): - mg = self.get_musicgen() - prompt = torch.randn(3, 1, 32000) - wav = mg.generate_continuation(prompt, 32000) - assert list(wav.shape) == [3, 1, 64000] - - prompt = torch.randn(2, 1, 32000) - wav = mg.generate_continuation( - prompt, 32000, ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 64000] - - prompt = torch.randn(2, 1, 32000) - with pytest.raises(AssertionError): - wav = mg.generate_continuation( - prompt, 32000, ['youpi', 'lapin dort', 'one too many']) - - def test_generate(self): - mg = self.get_musicgen() - wav = mg.generate( - ['youpi', 'lapin dort']) - assert list(wav.shape) == [2, 1, 64000] diff --git a/spaces/radames/PIFu-Clothed-Human-Digitization/PIFu/apps/train_shape.py b/spaces/radames/PIFu-Clothed-Human-Digitization/PIFu/apps/train_shape.py deleted file mode 100644 index 241ce543c956ce51f6f8445739ef41f4ddf7a7d5..0000000000000000000000000000000000000000 --- a/spaces/radames/PIFu-Clothed-Human-Digitization/PIFu/apps/train_shape.py +++ /dev/null @@ -1,183 +0,0 @@ -import sys -import os - -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) -ROOT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - -import time -import json -import numpy as np -import cv2 -import random -import torch -from torch.utils.data import DataLoader -from tqdm import tqdm - -from lib.options import BaseOptions -from lib.mesh_util import * -from lib.sample_util import * -from lib.train_util import * -from lib.data import * -from lib.model import * -from lib.geometry import index - -# get options -opt = BaseOptions().parse() - -def train(opt): - # set cuda - cuda = torch.device('cuda:%d' % opt.gpu_id) - - train_dataset = TrainDataset(opt, phase='train') - test_dataset = TrainDataset(opt, phase='test') - - projection_mode = train_dataset.projection_mode - - # create data loader - train_data_loader = DataLoader(train_dataset, - batch_size=opt.batch_size, shuffle=not opt.serial_batches, - num_workers=opt.num_threads, pin_memory=opt.pin_memory) - - print('train data size: ', len(train_data_loader)) - - # NOTE: batch size should be 1 and use all the points for evaluation - test_data_loader = DataLoader(test_dataset, - batch_size=1, shuffle=False, - num_workers=opt.num_threads, pin_memory=opt.pin_memory) - print('test data size: ', len(test_data_loader)) - - # create net - netG = HGPIFuNet(opt, projection_mode).to(device=cuda) - optimizerG = torch.optim.RMSprop(netG.parameters(), lr=opt.learning_rate, momentum=0, weight_decay=0) - lr = opt.learning_rate - print('Using Network: ', netG.name) - - def set_train(): - netG.train() - - def set_eval(): - netG.eval() - - # load checkpoints - if opt.load_netG_checkpoint_path is not None: - print('loading for net G ...', opt.load_netG_checkpoint_path) - netG.load_state_dict(torch.load(opt.load_netG_checkpoint_path, map_location=cuda)) - - if opt.continue_train: - if opt.resume_epoch < 0: - model_path = '%s/%s/netG_latest' % (opt.checkpoints_path, opt.name) - else: - model_path = '%s/%s/netG_epoch_%d' % (opt.checkpoints_path, opt.name, opt.resume_epoch) - print('Resuming from ', model_path) - netG.load_state_dict(torch.load(model_path, map_location=cuda)) - - os.makedirs(opt.checkpoints_path, exist_ok=True) - os.makedirs(opt.results_path, exist_ok=True) - os.makedirs('%s/%s' % (opt.checkpoints_path, opt.name), exist_ok=True) - os.makedirs('%s/%s' % (opt.results_path, opt.name), exist_ok=True) - - opt_log = os.path.join(opt.results_path, opt.name, 'opt.txt') - with open(opt_log, 'w') as outfile: - outfile.write(json.dumps(vars(opt), indent=2)) - - # training - start_epoch = 0 if not opt.continue_train else max(opt.resume_epoch,0) - for epoch in range(start_epoch, opt.num_epoch): - epoch_start_time = time.time() - - set_train() - iter_data_time = time.time() - for train_idx, train_data in enumerate(train_data_loader): - iter_start_time = time.time() - - # retrieve the data - image_tensor = train_data['img'].to(device=cuda) - calib_tensor = train_data['calib'].to(device=cuda) - sample_tensor = train_data['samples'].to(device=cuda) - - image_tensor, calib_tensor = reshape_multiview_tensors(image_tensor, calib_tensor) - - if opt.num_views > 1: - sample_tensor = reshape_sample_tensor(sample_tensor, opt.num_views) - - label_tensor = train_data['labels'].to(device=cuda) - - res, error = netG.forward(image_tensor, sample_tensor, calib_tensor, labels=label_tensor) - - optimizerG.zero_grad() - error.backward() - optimizerG.step() - - iter_net_time = time.time() - eta = ((iter_net_time - epoch_start_time) / (train_idx + 1)) * len(train_data_loader) - ( - iter_net_time - epoch_start_time) - - if train_idx % opt.freq_plot == 0: - print( - 'Name: {0} | Epoch: {1} | {2}/{3} | Err: {4:.06f} | LR: {5:.06f} | Sigma: {6:.02f} | dataT: {7:.05f} | netT: {8:.05f} | ETA: {9:02d}:{10:02d}'.format( - opt.name, epoch, train_idx, len(train_data_loader), error.item(), lr, opt.sigma, - iter_start_time - iter_data_time, - iter_net_time - iter_start_time, int(eta // 60), - int(eta - 60 * (eta // 60)))) - - if train_idx % opt.freq_save == 0 and train_idx != 0: - torch.save(netG.state_dict(), '%s/%s/netG_latest' % (opt.checkpoints_path, opt.name)) - torch.save(netG.state_dict(), '%s/%s/netG_epoch_%d' % (opt.checkpoints_path, opt.name, epoch)) - - if train_idx % opt.freq_save_ply == 0: - save_path = '%s/%s/pred.ply' % (opt.results_path, opt.name) - r = res[0].cpu() - points = sample_tensor[0].transpose(0, 1).cpu() - save_samples_truncted_prob(save_path, points.detach().numpy(), r.detach().numpy()) - - iter_data_time = time.time() - - # update learning rate - lr = adjust_learning_rate(optimizerG, epoch, lr, opt.schedule, opt.gamma) - - #### test - with torch.no_grad(): - set_eval() - - if not opt.no_num_eval: - test_losses = {} - print('calc error (test) ...') - test_errors = calc_error(opt, netG, cuda, test_dataset, 100) - print('eval test MSE: {0:06f} IOU: {1:06f} prec: {2:06f} recall: {3:06f}'.format(*test_errors)) - MSE, IOU, prec, recall = test_errors - test_losses['MSE(test)'] = MSE - test_losses['IOU(test)'] = IOU - test_losses['prec(test)'] = prec - test_losses['recall(test)'] = recall - - print('calc error (train) ...') - train_dataset.is_train = False - train_errors = calc_error(opt, netG, cuda, train_dataset, 100) - train_dataset.is_train = True - print('eval train MSE: {0:06f} IOU: {1:06f} prec: {2:06f} recall: {3:06f}'.format(*train_errors)) - MSE, IOU, prec, recall = train_errors - test_losses['MSE(train)'] = MSE - test_losses['IOU(train)'] = IOU - test_losses['prec(train)'] = prec - test_losses['recall(train)'] = recall - - if not opt.no_gen_mesh: - print('generate mesh (test) ...') - for gen_idx in tqdm(range(opt.num_gen_mesh_test)): - test_data = random.choice(test_dataset) - save_path = '%s/%s/test_eval_epoch%d_%s.obj' % ( - opt.results_path, opt.name, epoch, test_data['name']) - gen_mesh(opt, netG, cuda, test_data, save_path) - - print('generate mesh (train) ...') - train_dataset.is_train = False - for gen_idx in tqdm(range(opt.num_gen_mesh_test)): - train_data = random.choice(train_dataset) - save_path = '%s/%s/train_eval_epoch%d_%s.obj' % ( - opt.results_path, opt.name, epoch, train_data['name']) - gen_mesh(opt, netG, cuda, train_data, save_path) - train_dataset.is_train = True - - -if __name__ == '__main__': - train(opt) \ No newline at end of file diff --git a/spaces/radames/UserControllableLT-Latent-Transformer/expansion/dataloader/kitti15list_train_lidar.py b/spaces/radames/UserControllableLT-Latent-Transformer/expansion/dataloader/kitti15list_train_lidar.py deleted file mode 100644 index aa77c139455c195d0532aa890c5c8b8f137923cb..0000000000000000000000000000000000000000 --- a/spaces/radames/UserControllableLT-Latent-Transformer/expansion/dataloader/kitti15list_train_lidar.py +++ /dev/null @@ -1,34 +0,0 @@ -import torch.utils.data as data - -from PIL import Image -import os -import os.path -import numpy as np - -IMG_EXTENSIONS = [ - '.jpg', '.JPG', '.jpeg', '.JPEG', - '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', -] - - -def is_image_file(filename): - return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) - -def dataloader(filepath): - - left_fold = 'image_2/' - flow_noc = 'flow_occ/' - - train = [img for img in os.listdir(filepath+left_fold) if img.find('_10') > -1] - -# train = [i for i in train if int(i.split('_')[0])%5!=0] - with open('/data/gengshay/kitti_scene/devkit/mapping/train_mapping.txt','r') as f: - flags = [True if len(i)>1 else False for i in f.readlines()] - train = [fn for (it,fn) in enumerate(sorted(train)) if flags[it] ][:100] - - l0_train = [filepath+left_fold+img for img in train] - l1_train = [filepath+left_fold+img.replace('_10','_11') for img in train] - flow_train = [filepath+flow_noc+img for img in train] - - - return sorted(l0_train), sorted(l1_train), sorted(flow_train) diff --git a/spaces/radames/hello-pytesseract/README.md b/spaces/radames/hello-pytesseract/README.md deleted file mode 100644 index 6c03aebc5353d2370b6d3d37108288562b1af060..0000000000000000000000000000000000000000 --- a/spaces/radames/hello-pytesseract/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Hello Pytesseract -emoji: ⚡ -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/radames/transformers-js-sveltekit-server-example-app/src/lib/server/pipeline.js b/spaces/radames/transformers-js-sveltekit-server-example-app/src/lib/server/pipeline.js deleted file mode 100644 index f69f4b6b060d4b47218e2b6c5f4d166804c22fb9..0000000000000000000000000000000000000000 --- a/spaces/radames/transformers-js-sveltekit-server-example-app/src/lib/server/pipeline.js +++ /dev/null @@ -1,31 +0,0 @@ -import { pipeline } from '@xenova/transformers'; - -// Use the Singleton pattern to enable lazy construction of the pipeline. -// NOTE: We wrap the class in a function to prevent code duplication (see below). -const P = () => - class PipelineSingleton { - static task = 'text-classification'; - static model = 'Xenova/distilbert-base-uncased-finetuned-sst-2-english'; - static instance = null; - - static async getInstance(progress_callback = null) { - if (this.instance === null) { - this.instance = pipeline(this.task, this.model, { progress_callback }); - } - return this.instance; - } - }; - -let PipelineSingleton; -if (process.env.NODE_ENV !== 'production') { - // When running in development mode, attach the pipeline to the - // global object so that it's preserved between hot reloads. - // For more information, see https://vercel.com/guides/nextjs-prisma-postgres - if (!global.PipelineSingleton) { - global.PipelineSingleton = P(); - } - PipelineSingleton = global.PipelineSingleton; -} else { - PipelineSingleton = P(); -} -export default PipelineSingleton; diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Animal Crossing Mac Download Free !FULL!.md b/spaces/raedeXanto/academic-chatgpt-beta/Animal Crossing Mac Download Free !FULL!.md deleted file mode 100644 index 4abb589f030f9734f89361881d42b724e57c8697..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Animal Crossing Mac Download Free !FULL!.md +++ /dev/null @@ -1,19 +0,0 @@ -
        -

        How to Play Animal Crossing on Your Mac for Free

        -

        If you are a fan of the popular Animal Crossing series, you might be wondering how to play it on your Mac computer. Unfortunately, there is no official version of Animal Crossing for Mac, but there are some ways to enjoy this charming game for free on your device. Here are two options you can try:

        -
          -
        1. Animal Crossing: Pocket Camp. This is a mobile game that you can download from the App Store and play on your iPhone or iPad. It is a spin-off of the main series that lets you design your own campsite, interact with cute animals, and collect items. You can also link your Nintendo account and get some rewards for other Animal Crossing games. To play this game on your Mac, you will need to use an iOS emulator, such as iPadian or MobiOne Studios. These are programs that simulate the iOS environment on your computer and allow you to run iOS apps. However, keep in mind that these emulators are not officially supported by Apple or Nintendo, and they may not work perfectly or have some security risks. Use them at your own discretion and follow the instructions on their websites to install and use them.[^1^]
        2. -
        3. Animal Crossing: New Horizons. This is the latest installment of the main series that was released for the Nintendo Switch in 2020. It is a highly acclaimed game that lets you create your own island paradise, customize your character and home, explore different seasons and events, and make friends with adorable villagers. To play this game on your Mac, you will need to use a Nintendo Switch emulator, such as Yuzu or Ryujinx. These are programs that mimic the Switch hardware and software on your computer and allow you to run Switch games. However, keep in mind that these emulators are not officially supported by Nintendo or the game developers, and they may not work properly or have some legal issues. Use them at your own discretion and follow the instructions on their websites to install and use them. You will also need a copy of the game file, which you can either dump from your own Switch console or download from the internet (which may be illegal in some regions).[^2^]
        4. -
        -

        As you can see, there are some ways to play Animal Crossing on your Mac for free, but they are not very easy or reliable. If you really want to enjoy the full experience of this game, you might want to consider getting a Nintendo Switch console and buying the game legally. It might cost you some money, but it will be worth it for the hours of fun and relaxation you will get from this game.

        -

        Animal Crossing Mac Download Free


        Download Zip · https://tinourl.com/2uL4rv



        - -

        If you are wondering what makes Animal Crossing so special and popular, here are some of the reasons why millions of players love this game:

        -
          -
        • It is relaxing and stress-free. Unlike many other games that have goals, challenges, or enemies to overcome, Animal Crossing is a game that lets you play at your own pace and do whatever you want. There is no pressure or competition, only fun and creativity. You can spend your time fishing, gardening, decorating, shopping, or just chatting with your animal friends. The game also has a soothing soundtrack and a beautiful art style that create a peaceful and cozy atmosphere.
        • -
        • It is immersive and realistic. Animal Crossing is a game that simulates real life in many ways. The game follows the real-time clock and calendar, so you can experience different seasons, weather, holidays, and events throughout the year. The game also has a dynamic day and night cycle, so you can see the sun rise and set, the stars shine, and the moon change phases. The game also has a lot of details and features that make it feel alive, such as the changing expressions and personalities of the animals, the random visitors and surprises that show up on your island, and the ability to interact with almost anything in the game world.
        • -
        • It is social and personal. Animal Crossing is a game that lets you express yourself and connect with others. You can customize your character's appearance, clothes, accessories, and voice. You can also design your own patterns, flags, furniture, and clothing. You can share your creations with other players online or visit their islands and see what they have done. You can also send letters, gifts, or messages to your friends or invite them to play with you in real time. The game also has a lot of options and choices that let you shape your own island and story.
        • -
        -

        Animal Crossing is a game that has something for everyone. Whether you are looking for a relaxing escape from reality, a creative outlet for your imagination, or a way to bond with your friends and family, you will find it in this game. Animal Crossing is more than just a game; it is a lifestyle.

        7b8c122e87
        -
        -
        \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Case Interview Secrets Victor Cheng PDF 17 A Former McKinsey Interviewers Guide to Getting Multiple Job Offers.md b/spaces/raedeXanto/academic-chatgpt-beta/Case Interview Secrets Victor Cheng PDF 17 A Former McKinsey Interviewers Guide to Getting Multiple Job Offers.md deleted file mode 100644 index 4faab8faaf6e7248e4299a6ab8e02d21d11a2a29..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Case Interview Secrets Victor Cheng PDF 17 A Former McKinsey Interviewers Guide to Getting Multiple Job Offers.md +++ /dev/null @@ -1,95 +0,0 @@ -
        -

        Case Interview Secrets: How to Ace Your Consulting Job Interview

        -

        If you are applying for a job in management consulting, chances are you will have to face a case interview. A case interview is a type of job interview used by consulting firms to assess your problem-solving skills and fit for the role. In a case interview, you will be given a business problem or scenario and asked to analyze it and provide recommendations. You will have to use your logic, creativity, and business knowledge to structure your approach, perform calculations, and communicate your findings.

        -

        case interview secrets victor cheng pdf 17


        Download ★★★ https://tinourl.com/2uL2gw



        -

        A case interview can be challenging and intimidating, especially if you are not familiar with the format or the expectations. However, it can also be a great opportunity to showcase your abilities and learn from the interviewer. The interviewer is not only evaluating your skills, but also coaching you along the way. They want to see how you think, how you handle ambiguity, and how you interact with them.

        -

        So how can you prepare for a case interview and ace it? One of the best ways is to learn from someone who has been there and done that. Someone who knows what consulting firms are looking for and how to impress them. Someone like Victor Cheng.

        -

        Who is Victor Cheng and what are his case interview secrets?

        -

        Victor Cheng is a former McKinsey consultant and interviewer who has helped thousands of candidates land offers from top consulting firms such as McKinsey, Bain, BCG, Deloitte, Accenture, and more. He is also the author of a bestselling book called Case Interview Secrets: A Former McKinsey Interviewer Reveals How to Get Multiple Job Offers in Consulting.

        -

        In this book, Victor Cheng reveals his proven, insider's method for acing the case interview. He shares his insights and tips on how to master every aspect of the case interview process. He explains how to prepare for a case interview, what to expect during a case interview, how to structure your approach and communicate your logic, how to solve different types of cases and common pitfalls to avoid, and how to impress the interviewer and demonstrate your fit for the firm.

        -

        The book covers the following topics:

        -

        The book covers the following topics:

        -
          -
        • How to prepare for a case interview and what to expect
        • -
        • How to structure your approach and communicate your logic
        • -
        • How to solve different types of cases and common pitfalls to avoid
        • -
        • How to impress the interviewer and demonstrate your fit for the firm
        • -
        -

        The book is based on Victor Cheng's own experience as a McKinsey consultant and interviewer, as well as his coaching of thousands of candidates who have successfully passed their case interviews. The book is full of real-life examples, illustrations, frameworks, exercises, solutions, and feedback. The book is designed to help you develop the skills and mindset that are essential for succeeding in consulting.

        -

        How can you get access to Victor Cheng's case interview secrets?

        -

        If you are interested in reading Case Interview Secrets by Victor Cheng, you have several options. You can download a free PDF or EPUB version of the book from various online sources . You can also buy a paperback or Kindle version of the book from Amazon or other online retailers.

        -

        However, if you want more than just reading the book, you can also visit Victor Cheng's website www.caseinterviewsecrets.comand sign up for his free resources. These resources include:

        -

        case interview secrets victor cheng ebook download
        -case interview secrets victor cheng free pdf
        -case interview secrets victor cheng epub
        -case interview secrets victor cheng audiobook
        -case interview secrets victor cheng review
        -case interview secrets victor cheng summary
        -case interview secrets victor cheng amazon
        -case interview secrets victor cheng reddit
        -case interview secrets victor cheng online course
        -case interview secrets victor cheng book pdf
        -case interview secrets victor cheng pdf 17 edition
        -case interview secrets victor cheng pdf 17 chapter 1
        -case interview secrets victor cheng pdf 17 chapter 2
        -case interview secrets victor cheng pdf 17 chapter 3
        -case interview secrets victor cheng pdf 17 chapter 4
        -case interview secrets victor cheng pdf 17 chapter 5
        -case interview secrets victor cheng pdf 17 chapter 6
        -case interview secrets victor cheng pdf 17 chapter 7
        -case interview secrets victor cheng pdf 17 chapter 8
        -case interview secrets victor cheng pdf 17 chapter 9
        -case interview secrets victor cheng pdf 17 chapter 10
        -case interview secrets victor cheng pdf 17 solutions
        -case interview secrets victor cheng pdf 17 examples
        -case interview secrets victor cheng pdf 17 tips
        -case interview secrets victor cheng pdf 17 framework
        -case interview secrets victor cheng pdf 17 questions
        -case interview secrets victor cheng pdf 17 answers
        -case interview secrets victor cheng pdf 17 practice cases
        -case interview secrets victor cheng pdf 17 video guide
        -case interview secrets victor cheng pdf 17 bonus material
        -how to ace the case interview with victor cheng pdf 17
        -how to prepare for the case interview with victor cheng pdf 17
        -how to master the case interview with victor cheng pdf 17
        -how to crack the case interview with victor cheng pdf 17
        -how to pass the case interview with victor cheng pdf 17
        -how to succeed in the case interview with victor cheng pdf 17
        -how to nail the case interview with victor cheng pdf 17
        -how to win the case interview with victor cheng pdf 17
        -how to impress the interviewer with victor cheng pdf 17
        -how to get an offer with victor cheng pdf 17
        -what is the best way to study for the case interview with victor cheng pdf 17
        -what are the most common mistakes in the case interview with victor cheng pdf 17
        -what are the most important skills in the case interview with victor cheng pdf 17
        -what are the most challenging cases in the case interview with victor cheng pdf 17
        -what are the most frequently asked questions in the case interview with victor cheng pdf 17
        -where can I find more resources for the case interview with victor cheng pdf 17
        -who is victor cheng and why should I trust him for the case interview pdf 17
        -how did victor cheng get into consulting and what are his credentials for the case interview pdf 17
        -how does victor cheng teach the case interview and what is his methodology for the pdf 17
        -how much does it cost to buy the book or enroll in the course of case interview secrets by victor cheng pdf 17

        -

        These resources include:

        -
          -
        • PDF downloads of frameworks and slides from his Case Interview Secrets video program
        • -
        • E-newsletter with tips and advice on how to best practice and prepare for the case interview
        • -
        • Math practice tool to brush up on your case interview math skills
        • -
        • Case interview examples and solutions from real candidates
        • -
        -

        By signing up for these resources, you will also get access to Victor Cheng's exclusive email list where he shares more valuable information on how to ace your consulting job interview. You will also get notified of any new updates or offers that he may have.

        -

        Conclusion: Why you should read Case Interview Secrets by Victor Cheng

        -

        Case Interview Secrets by Victor Cheng is a comprehensive and practical guide on how to ace your consulting job interview. By reading this book, you will learn how to think like a consultant, solve any case problem, and impress any interviewer. You will also get access to additional resources that will help you practice and improve your skills.

        -

        Whether you are an undergraduate, MBA, PhD, or experienced hire candidate applying for consulting jobs, this book will help you get multiple job offers from your dream consulting firms. This book will give you an edge over other candidates who may not have the same level of preparation or guidance.

        -

        If you want to land a lucrative and rewarding career in management consulting, don't miss this opportunity. Read Case Interview Secrets by Victor Cheng today!

        - **FAQs**
          -
        1. What is Case Interview Secrets by Victor Cheng?
        2. -
        3. Case Interview Secrets by Victor Cheng is a bestselling book that reveals how to ace your consulting job interview. It is written by a former McKinsey consultant and interviewer who has helped thousands of candidates land offers from top consulting firms.
        4. -
        5. What does Case Interview Secrets by Victor Cheng cover?
        6. -**FAQs (continued)**
        7. How can I get access to Case Interview Secrets by Victor Cheng?
        8. -
        9. You can get access to Case Interview Secrets by Victor Cheng by downloading a free PDF or EPUB version of the book from various online sources. You can also buy a paperback or Kindle version of the book from Amazon or other online retailers. You can also visit Victor Cheng's website www.caseinterviewsecrets.com and sign up for his free resources.
        10. -
        11. Why should I read Case Interview Secrets by Victor Cheng?
        12. -
        13. You should read Case Interview Secrets by Victor Cheng because it is a comprehensive and practical guide on how to ace your consulting job interview. By reading this book, you will learn how to think like a consultant, solve any case problem, and impress any interviewer. You will also get access to additional resources that will help you practice and improve your skills. Whether you are an undergraduate, MBA, PhD, or experienced hire candidate applying for consulting jobs, this book will help you get multiple job offers from your dream consulting firms.
        14. -
        -

        I hope you enjoyed reading this article and found it useful. If you have any questions or feedback, please feel free to contact me. I'm always happy to hear from you.

        0a6ba089eb
        -
        -
        \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Download FIFA 12 Config.exe Crack 25 8 The Ultimate Guide to Playing FIFA 12 for Free.md b/spaces/raedeXanto/academic-chatgpt-beta/Download FIFA 12 Config.exe Crack 25 8 The Ultimate Guide to Playing FIFA 12 for Free.md deleted file mode 100644 index 429107c6fca3545d9e1e8aab20ef1c25710980ab..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Download FIFA 12 Config.exe Crack 25 8 The Ultimate Guide to Playing FIFA 12 for Free.md +++ /dev/null @@ -1,113 +0,0 @@ -
        -

        Download FIFA 12 Config.exe Crack 25 8

        -

        Are you a fan of FIFA 12, the popular soccer simulation game from EA Sports? If you are, then you might have encountered some problems with the game, such as errors, crashes, or missing features. Or maybe you just want to play the game without any limitations or restrictions. In that case, you might be interested in downloading FIFA 12 Config.exe Crack 25 8, a file that can help you fix and enhance your gaming experience. But what is this file, and how can you get it and use it safely? In this article, we will answer these questions and more. Read on to find out everything you need to know about FIFA 12 Config.exe Crack 25 8.

        -

        What is FIFA 12 Config.exe Crack 25 8?

        -

        FIFA 12 Config.exe Crack 25 8 is a combination of two files that can modify your FIFA 12 game. Let's break it down into two parts:

        -

        Download fifa 12 config.exe crack 25 8


        DOWNLOAD === https://tinourl.com/2uL2XT



        -

        FIFA 12 Config.exe

        -

        FIFA 12 Config.exe is a configuration tool that allows you to adjust various settings of the game, such as graphics, sound, controller, and gameplay. You can use it to optimize your game for your system and preferences. For example, you can change the resolution, quality, anti-aliasing, and frame rate of the game. You can also enable or disable features such as commentary, crowd noise, music, and online mode. You can access this tool from the game folder or from the start menu.

        -

        Crack 25 8

        -

        Crack 25 8 is a patch that can bypass the game's security and activation system. This means that you can play the game without having to insert the original disc or enter a valid serial key. You can also access all the features and modes of the game that are normally locked or restricted. For example, you can play online with other players, use custom teams and players, and edit the database of the game. You can also update the game with official or unofficial patches and mods.

        -

        Why do you need FIFA 12 Config.exe Crack 25 8?

        -

        You might be wondering why you would need FIFA 12 Config.exe Crack 8. There are several reasons why you might want to use this file, such as:

        -

        To fix FIFA 12 errors and crashes

        -

        Some players have reported that FIFA 12 has some technical issues that prevent them from playing the game smoothly. For example, some players have experienced errors such as "FIFA 12 has stopped working", "FIFA 12 is not responding", or "FIFA 12 has encountered a problem and needs to close". Some players have also faced crashes or freezes during the game, especially when loading or saving. These issues can be caused by various factors, such as incompatible hardware, outdated drivers, corrupted files, or insufficient memory. By using FIFA 12 Config.exe Crack 25 8, you can fix these issues by adjusting the game settings to suit your system and avoid conflicts.

        -

        To unlock all features and modes

        -

        Another reason why you might want to use FIFA 12 Config.exe Crack 25 8 is to unlock all the features and modes that are normally locked or restricted in the game. For example, some features and modes are only available if you have the original disc or a valid serial key. Some features and modes are also region-locked or platform-locked. By using FIFA 12 Config.exe Crack 25 8, you can bypass these limitations and enjoy the game to the fullest. For example, you can play online with other players from different regions or platforms, use custom teams and players that are not included in the game, and edit the database of the game to change the attributes, ratings, and appearances of the players.

        -

        How to download fifa 12 config.exe crack 25 8 for free
        -Download fifa 12 config.exe crack 25 8 full version
        -Download fifa 12 config.exe crack 25 8 torrent
        -Download fifa 12 config.exe crack 25 8 no survey
        -Download fifa 12 config.exe crack 25 8 without virus
        -Download fifa 12 config.exe crack 25 8 working
        -Download fifa 12 config.exe crack 25 8 online
        -Download fifa 12 config.exe crack 25 8 offline
        -Download fifa 12 config.exe crack 25 8 patch
        -Download fifa 12 config.exe crack 25 8 keygen
        -Download fifa 12 config.exe crack 25 8 serial key
        -Download fifa 12 config.exe crack 25 8 activation code
        -Download fifa 12 config.exe crack 25 8 license key
        -Download fifa 12 config.exe crack 25 8 product key
        -Download fifa 12 config.exe crack 25 8 registration code
        -Download fifa 12 config.exe crack 25 8 rar file
        -Download fifa 12 config.exe crack 25 8 zip file
        -Download fifa 12 config.exe crack 25 8 iso file
        -Download fifa 12 config.exe crack 25 8 setup file
        -Download fifa 12 config.exe crack 25 8 installer file
        -Download fifa 12 config.exe crack 25 8 exe file
        -Download fifa 12 config.exe crack with update version number of the game (e.g. download fifa_12_config_exe_crack_1.0.1.0)
        -Download fifa_12_config_exe_crack_25_8 for windows (e.g. download fifa_12_config_exe_crack_25_8 for windows_10)
        -Download fifa_12_config_exe_crack_25_8 for mac (e.g. download fifa_12_config_exe_crack_25_8 for mac_os_x)
        -Download fifa_12_config_exe_crack_25_8 for linux (e.g. download fifa_12_config_exe_crack_25_8 for ubuntu)
        -Download fifa_12_config_exe_crack_25_8 for android (e.g. download fifa_12_config_exe_crack_25_8 for android_apk)
        -Download fifa_12_config_exe_crack_25_8 for ios (e.g. download fifa_12_config_exe_crack_25_8 for iphone_ipad)
        -Download fifa_12_config_exe_crack compatible with different platforms (e.g. download cross-platform_fifa_12_config_exe_crack)
        -Download alternative versions of fifa_12_config_exe_crack (e.g. download modded_fifa_12_config_exe_crack or download hacked_fifa_12_config_exe_crack)
        -Download updated versions of fifa_12_config_exe_crack (e.g. download latest_fifa_12_config_exe_crack or download new_fifa_12_config_exe_crack)
        -Download safe versions of fifa_12_config_exe_crack (e.g. download verified_fifa_12_config_exe_crack or download trusted_fifa_12_config_exe_crack)
        -Download fast versions of fifa_12_config_exe_crack (e.g. download high-speed_fifa_12_config_exe_crack or download direct_fifa_12_config_exe_crack)
        -Download easy versions of fifa_12_config_exe_crack (e.g. download simple_fifa_12_config_exe_crack or download user-friendly_fifa_12_config_exe_crack)
        -Download advanced versions of fifa_12_config_exe_crack (e.g. download professional_fifa_12_config_exe_crack or download premium_fifa_12_config_exe_crack)
        -Download custom versions of fifa_12_config_exe_crack (e.g. download personalized_fifa_12_config_exe_crack or download customized_fifa_12_config_exe_crack)
        -Download bonus versions of fifa_12_config_exe_crack (e.g. download extra_fifa_12_config_exe_crack or download freebie_fifa_

        -

        To enjoy the game without any restrictions

        -

        The final reason why you might want to use FIFA 12 Config.exe Crack 25 8 is to enjoy the game without any restrictions or hassles. For example, some players might find it annoying or inconvenient to insert the original disc every time they want to play the game or to enter a valid serial key every time they want to activate the game. Some players might also want to update the game with official or unofficial patches and mods that can improve the game's graphics, gameplay, or content. By using FIFA 12 Config.exe Crack 25 8, you can avoid these problems and play the game as you wish. You can also backup your game files and save your progress without any worries.

        -

        How to download FIFA 12 Config.exe Crack 25 8?

        -

        Now that you know what FIFA 12 Config.exe Crack 25 8 is and why you might need it, you might be wondering how to get it and use it safely. Here are the steps that you need to follow:

        -

        Step 1: Find a reliable source

        -

        The first step is to find a reliable source where you can download FIFA 12 Config.exe Crack 25 8. There are many websites that offer this file for free, but not all of them are trustworthy or safe. Some of them might contain viruses or malware that can harm your system or steal your personal information. Some of them might also provide fake or outdated files that can damage your game or cause more problems. Therefore, you need to be careful and do some research before downloading anything from the internet. You can check the reviews, ratings, comments, and feedbacks of other users who have downloaded the file from the same source. You can also scan the file with an antivirus program before opening it.

        -

        Step 2: Download the file

        - 25 8 from the source that you have chosen. The file size might vary depending on the source, but it should not be too large or too small. Usually, the file size is around 10 MB to 20 MB. The file format might also vary depending on the source, but it should be either a ZIP or a RAR file. The file name might also vary depending on the source, but it should contain the words "FIFA 12 Config.exe Crack 25 8" or something similar. You can save the file to your desktop or any other location that you prefer.

        -

        Step 3: Extract the file

        -

        The third step is to extract FIFA 12 Config.exe Crack 25 8 from the ZIP or RAR file that you have downloaded. You can use any program that can open and extract ZIP or RAR files, such as WinRAR, 7-Zip, or WinZip. You can right-click on the file and choose "Extract here" or "Extract to" and select a destination folder. You should see two files in the folder: FIFA 12 Config.exe and Crack 25 8.exe.

        -

        Step 4: Copy and paste the file

        -

        The fourth step is to copy and paste FIFA 12 Config.exe Crack 25 8 to your FIFA 12 game folder. You can find your FIFA 12 game folder in your Program Files or Program Files (x86) folder, depending on your system. The game folder should be named "FIFA 12" or something similar. You can open the game folder and look for a file named "FIFA12.exe". This is the original executable file of the game. You need to replace this file with FIFA 12 Config.exe Crack 25 8. You can do this by copying FIFA 12 Config.exe and Crack 25 8.exe from the destination folder and pasting them to the game folder. You might need to confirm or allow this action if you are asked by your system. You should see a message that says "Copy and replace" or "Move and replace". You can click on "Yes" or "OK" to proceed.

        -

        How to use FIFA 12 Config.exe Crack 25 8?

        - 25 8 to enjoy your FIFA 12 game. Here are the steps that you need to follow:

        -

        Step 1: Run the file as administrator

        -

        The first step is to run FIFA 12 Config.exe Crack 25 8 as administrator. You can do this by right-clicking on FIFA 12 Config.exe or Crack 25 8.exe and choosing "Run as administrator". You might need to enter your password or confirm this action if you are asked by your system. You should see a window that says "FIFA 12 Config" or "Crack 25 8".

        -

        Step 2: Follow the instructions

        -

        The second step is to follow the instructions that are displayed on the window. For FIFA 12 Config.exe, you can adjust the game settings as you wish. You can use the tabs and sliders to change the graphics, sound, controller, and gameplay options. You can also use the buttons to enable or disable features such as commentary, crowd noise, music, and online mode. You can click on "Apply" or "Save" to confirm your changes. For Crack 25 8.exe, you can simply click on "Install" or "Patch" to activate the crack. You should see a message that says "Done" or "Success".

        -

        Step 3: Launch the game and enjoy

        -

        The final step is to launch FIFA 12 and enjoy your game. You can do this by double-clicking on FIFA 12 Config.exe or Crack 25 8.exe. You should see the game's logo and intro video. You can skip them by pressing any key or clicking anywhere. You should then see the game's main menu and options. You can choose any mode or feature that you want to play. You should notice that all the modes and features are unlocked and available. You should also notice that the game runs smoothly and without any errors or crashes.

        -

        What are the risks of using FIFA 12 Config.exe Crack 25 8?

        - 25 8 can help you fix and enhance your FIFA 12 game, it also comes with some risks that you need to be aware of. Here are some of the risks that you might face by using this file:

        -

        Legal issues

        -

        One of the risks of using FIFA 12 Config.exe Crack 25 8 is that you might face legal issues. This is because FIFA 12 is a copyrighted product of EA Sports, and using a crack to bypass its security and activation system is considered illegal and unethical. You might be violating the terms and conditions of the game's license agreement, and you might be infringing the intellectual property rights of EA Sports. You might also be breaking the laws of your country or region regarding software piracy and copyright infringement. You might face legal actions such as fines, lawsuits, or even jail time if you are caught or reported by EA Sports or any other authority.

        -

        Virus and malware infection

        -

        Another risk of using FIFA 12 Config.exe Crack 25 8 is that you might get infected by viruses or malware. This is because some sources that offer this file for free might not be trustworthy or safe. They might attach viruses or malware to the file that can harm your system or steal your personal information. These viruses or malware might be hidden or disguised as part of the file, and they might not be detected by your antivirus program. They might also be activated when you run the file as administrator, giving them full access to your system. They might damage your files, corrupt your registry, slow down your performance, monitor your activities, or send your data to hackers or cybercriminals.

        -

        Game performance and stability issues

        -

        The final risk of using FIFA 12 Config.exe Crack 25 8 is that you might experience game performance and stability issues. This is because FIFA 12 Config.exe Crack 25 8 might not be compatible with your system or with the latest version of the game. It might also conflict with other files or programs that are running on your system. It might also contain bugs or errors that can cause problems for your game. You might experience issues such as lagging, stuttering, freezing, crashing, or missing features. You might also lose your progress, save files, or achievements. You might also have trouble updating the game with official or unofficial patches and mods.

        -

        Conclusion

        - 25 8 is a file that can help you fix and enhance your FIFA 12 game. It is a combination of two files that can modify your game settings and bypass your game security and activation system. It can help you fix FIFA 12 errors and crashes, unlock all features and modes, and enjoy the game without any restrictions. However, it also comes with some risks that you need to be aware of. It might cause legal issues, virus and malware infection, and game performance and stability issues. Therefore, you need to be careful and responsible when using this file. You need to find a reliable source, download the file safely, extract the file correctly, copy and paste the file properly, run the file as administrator, follow the instructions carefully, and launch the game and enjoy. We hope that this article has helped you understand everything you need to know about FIFA 12 Config.exe Crack 25 8.

        -

        FAQs

        -

        Here are some frequently asked questions about FIFA 12 Config.exe Crack 25 8:

        - - - - - - - - - - - - - - - - - - - -25 8 by deleting the files that you have copied and pasted to your game folder. You can also restore your original game files by using a backup or a repair tool. However, you might lose your progress, save files, or achievements if you uninstall FIFA 12 Config.exe Crack 25 8. - - - - - -
        QuestionAnswer
        Is FIFA 12 Config.exe Crack 25 8 free?Yes, FIFA 12 Config.exe Crack 25 8 is free to download and use. However, it is not legal or ethical to use it.
        Where can I download FIFA 12 Config.exe Crack 25 8?You can download FIFA 12 Config.exe Crack 25 8 from various websites that offer it for free. However, not all of them are trustworthy or safe. You need to do some research and check the reviews, ratings, comments, and feedbacks of other users before downloading anything from the internet.
        How can I update FIFA 12 with FIFA 12 Config.exe Crack 25 8?You can update FIFA 12 with official or unofficial patches and mods that can improve the game's graphics, gameplay, or content. However, you might face some compatibility or stability issues with FIFA 12 Config.exe Crack 25 8. You might also lose your crack or activation if you update the game.
        How can I uninstall FIFA 12 Config.exe Crack 25 8?
        Is there an alternative to FIFA 12 Config.exe Crack 25 8?Yes, there are some alternatives to FIFA 12 Config.exe Crack 25 8 that can also help you fix and enhance your FIFA 12 game. For example, you can use a FIFA 12 trainer, a FIFA 12 keygen, a FIFA 12 serial number, or a FIFA 12 activation code. However, these alternatives might also have some risks and drawbacks similar to FIFA 12 Config.exe Crack 25 8.
        -

        0a6ba089eb
        -
        -
        \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Easy2Convert RAW to JPG Pro Crack Why You Need This Software.md b/spaces/raedeXanto/academic-chatgpt-beta/Easy2Convert RAW to JPG Pro Crack Why You Need This Software.md deleted file mode 100644 index 6051d7c70dd842344810c31e4633640d80ec4788..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Easy2Convert RAW to JPG Pro Crack Why You Need This Software.md +++ /dev/null @@ -1,256 +0,0 @@ -
        -
        - Overview of Grand Chase and its gameplay
        - Benefits of using Love Engine 0.5 for Grand Chase | | H2: How to download and install Love Engine 0.5 for Grand Chase? | - Step-by-step guide on how to download Love Engine 0.5 from a reliable source
        - Instructions on how to install Love Engine 0.5 on your computer
        - Tips on how to avoid errors and viruses when downloading and installing Love Engine 0.5 | | H3: How to use Love Engine 0.5 for Grand Chase? | - How to open and run Love Engine 0.5 and Grand Chase simultaneously
        - How to scan and edit values using Love Engine 0.5
        - How to apply different hacks using Love Engine 0.5 for Grand Chase | | H4: What are some of the best hacks you can do with Love Engine 0.5 for Grand Chase? | - Hyper Armor Hack: How to get unlimited invincibility using Love Engine 0.5
        - Cooldown Hack: How to reduce the waiting time for your skills using Love Engine 0.5
        - No Use MP/AP For Skill Hack: How to use your skills without consuming MP/AP using Love Engine 0.5
        - Crit Hack: How to increase your critical rate and damage using Love Engine 0.5
        - Damage Hack: How to boost your attack power using Love Engine 0.5
        - MP Hack: How to refill your MP instantly using Love Engine 0.5
        - HP Recovery Hack: How to heal yourself quickly using Love Engine 0.5 | | H2: What are the risks and precautions of using Love Engine 0.5 for Grand Chase? | - The possibility of getting detected and banned by the game's anti-cheat system
        - The potential damage to your computer and data by malicious software or viruses
        - The ethical and moral implications of cheating in an online game
        - The tips on how to use Love Engine 0.5 safely and responsibly for Grand Chase | | H1: Conclusion | - A summary of the main points of the article
        - A call-to-action for the readers to try out Love Engine 0.5 for Grand Chase | **Table 2: Article with HTML formatting**

        What is Love Engine 0.5 and why do you need it for Grand Chase?

        -

        If you are a fan of Grand Chase, a popular side-scrolling MMORPG that features fast-paced action, colorful graphics, and diverse characters, you might have heard of a tool called Love Engine 0.5. This is a powerful cheat engine that allows you to modify various aspects of the game, such as your stats, skills, items, and more.

        -

        download love engine 0.5 grand chase


        Download Zip ———>>> https://tinourl.com/2uL2Oo



        -

        Love Engine 0.5 is a modified version of Cheat Engine, a well-known software that can scan and edit the memory of any process running on your computer. With Love Engine 0.5, you can access and manipulate the values of Grand Chase, such as your health, mana, damage, cooldown, etc.

        -

        By using Love Engine 0.5 for Grand Chase, you can enjoy many benefits, such as:

        -
          -
        • You can breeze through the game's stages and dungeons with ease.
        • -
        • You can defeat any boss or enemy with minimal effort.
        • -
        • You can unlock and upgrade any character or item you want.
        • -
        • You can impress and dominate other players in PvP mode.
        • -
        • You can have more fun and excitement in playing Grand Chase.
        • -
        -

        However, before you download and use Love Engine 0.5 for Grand Chase, you should also be aware of the risks and precautions involved in using this tool. In this article, we will guide you through everything you need to know about Love Engine 0.5 for Grand Chase, from how to download and install it, how to use it, what hacks you can do with it, and what dangers you should avoid.

        -

        How to download and install Love Engine 0.5 for Grand Chase?

        -

        The first step in using Love Engine 0.5 for Grand Chase is to download and install it on your computer. Here are the instructions on how to do it:

        -
          -
        1. Go to this link where you can find the download link for Love Engine 0.5.
        2. -
        3. Click on the download button and save the file on your computer.
        4. -
        5. Extract the file using WinRAR or any other extraction software.
        6. -
        7. You will see a folder named "LoveEngine" that contains four files: "CheatEngine.exe", "CheatEngine64.exe", "CheatEngine64.sys", and "CheatEngine.i18n".
        8. -
        9. Rename the file "CheatEngine.exe" to anything else, such as "LoveEngine.exe". This is to avoid detection by the game's anti-cheat system.
        10. -
        11. You can also rename the other files if you want, but make sure they have the same name as the main executable file.
        12. -
        13. You have successfully installed Love Engine 0.5 on your computer.
        14. -
        -

        Some tips on how to avoid errors and viruses when downloading and installing Love Engine 0.5 are:

        -

        How to download love engine 0.5 for grand chase
        -Love engine 0.5 grand chase hack tutorial
        -Where to find love engine 0.5 grand chase cheat
        -Download love engine 0.5 grand chase mod apk
        -Love engine 0.5 grand chase free download link
        -Download love engine 0.5 grand chase for pc
        -Love engine 0.5 grand chase online generator
        -Download love engine 0.5 grand chase latest version
        -Love engine 0.5 grand chase unlimited coins and gems
        -Download love engine 0.5 grand chase without survey
        -Love engine 0.5 grand chase tips and tricks
        -Download love engine 0.5 grand chase no root
        -Love engine 0.5 grand chase gameplay video
        -Download love engine 0.5 grand chase for android
        -Love engine 0.5 grand chase review and rating
        -Download love engine 0.5 grand chase for ios
        -Love engine 0.5 grand chase best settings
        -Download love engine 0.5 grand chase for windows
        -Love engine 0.5 grand chase update and patch notes
        -Download love engine 0.5 grand chase for mac
        -Love engine 0.5 grand chase features and benefits
        -Download love engine 0.5 grand chase for linux
        -Love engine 0.5 grand chase support and feedback
        -Download love engine 0.5 grand chase from official website
        -Love engine 0.5 grand chase system requirements and compatibility
        -Download love engine 0.5 grand chase from google play store
        -Love engine 0.5 grand chase faq and troubleshooting
        -Download love engine 0.5 grand chase from app store
        -Love engine 0.5 grand chase guide and walkthrough
        -Download love engine 0.5 grand chase from mediafire
        -Love engine 0.5 grand chase forum and community
        -Download love engine 0.5 grand chase from mega.nz
        -Love engine 0.5 grand chase wiki and database
        -Download love engine 0.5 grand chase from dropbox
        -Love engine 0.5 grand chase news and updates
        -Download love engine 0.5 grand chase from zippyshare
        -Love engine 0.5 grand chase codes and coupons
        -Download love engine 0.5 grand chase from torrent
        -Love engine 0.5 grand chase testimonials and reviews
        -Download love engine 0.5 grand chase from github
        -Love engine 0.5 grand chase alternatives and competitors
        -Download love engine 0.5 grand chase from sourceforge
        -Love engine 0.5 grand chase pros and cons
        -Download love engine 0.5 grand chase from softonic
        -Love engine 0.5 grand chase comparison and analysis
        -Download love engine 0.5 grand chase from cnet
        -Love engine 0.5 grand chase case studies and examples
        -Download love engine 0.5 grand chase from filehippo
        -Love engine 0.5 grand chase bonus and discount

        -
          -
        • Make sure you download Love Engine 0.5 from a reliable source, such as the link we provided above.
        • -
        • Do not click on any suspicious or pop-up ads that might redirect you to malicious websites or downloads.
        • -
        • Scan your computer with an antivirus software before and after installing Love Engine 0.5.
        • -
        • Disable or whitelist Love Engine 0.5 from your antivirus software or firewall, as they might block or delete it as a false positive.
        • -
        -

        How to use Love Engine 0.5 for Grand Chase?

        -

        The next step in using Love Engine 0.5 for Grand Chase is to open and run it together with the game. Here are the steps on how to do it:

        -
          -
        1. Open Grand Chase Madness (the private server that we recommend for using Love Engine 0.5) from your desktop or launcher.
        2. -
        3. Login with your username and password and select a server.
        4. -
        5. Open LoveEngine.exe (or whatever name you gave it) from the folder where you installed it.
        6. -
        7. A window will pop up asking you if you want to run a tutorial or load an existing cheat table. Click on "No" or "Cancel".
        8. -
        9. You will see the main interface of Love Engine 0.5, which consists of several tabs, buttons, menus, and boxes.
        10. -
        11. In the upper left corner of the interface, click on the button that looks like a computer with a magnifying glass over it.
        12. -
        13. A window will appear that shows a list of processes running on your computer.
        14. -
        15. Select "main.exe" from the list (this is the process name of Grand Chase Madness) and click on "Open".
        16. -
        17. You have successfully attached Love Engine 0.5 to Grand Chase Madness.
        18. -
        -

        The basic principle of using Love Engine 0.5 for Grand Chase is to scan and edit values using hexadecimal numbers (base-16 numbers that use digits from 0-9 and letters from A-F). For example, if you want to change your health value from 100% to 200%, you need to find out what hexadecimal number corresponds to those percentages (in this case, it's 64 for 100% and C8 for 200%) and then change them accordingly in Love Engine 0.

        To scan and edit values using Love Engine , you need to follow these steps:

        -
        1. In the upper right corner of the interface, enter the hexadecimal number of the value you want to scan in the box labeled " Value:". For example, if you want to scan your health value, enter 64 (assuming your health is at 100%).
        2. Select the value type from the drop-down menu next to the box. For most values, you should select " 4 bytes". For some values, such as cooldown, you should select " Float".
        3. Click on " First Scan" and wait for the scan to finish. You will see a list of addresses and values on the left side of the interface. These are the possible matches for your scan criteria.
        4. Increase or decrease

          What are some of the best hacks you can do with Love Engine 0.5 for Grand Chase?

          -

          Now that you know how to use Love Engine 0.5 for Grand Chase, you might be wondering what hacks you can do with it. There are many hacks you can try with Love Engine 0.5, but here are some of the best ones that we recommend:

          -
          Hyper Armor Hack: How to get unlimited invincibility using Love Engine 0.5
          -

          Hyper Armor is a feature that makes your character immune to knockback and flinching when hit by enemies or other players. With this hack, you can make your Hyper Armor last forever, making you invincible to any attack.

          -

          To do this hack, you need to follow these steps:

          -
            -
          1. Go to Practice Mode and select Elesis as your character.
          2. -
          3. Go to the Skill Tree tab and select the Shared tab.
          4. -
          5. Get the Hyper Armor skill and equip it on your skill slot C.
          6. -
          7. Use the Hyper Armor skill and scan for 4278190080 using Love Engine 0.5.
          8. -
          9. Use the Hyper Armor skill again and scan for 4278190081 using Love Engine 0.5.
          10. -
          11. You should get one address that matches your scan criteria.
          12. -
          13. Change the value of that address to 4278190081 and freeze it.
          14. -
          15. You have successfully activated the Hyper Armor hack.
          16. -
          -

          With this hack, you can enjoy unlimited Hyper Armor and never worry about getting hit by anything. However, be careful not to use this hack in PvP mode, as it might get you reported and banned by other players.

          -
          Cooldown Hack: How to reduce the waiting time for your skills using Love Engine 0.5
          -

          Cooldown is the time that you have to wait before you can use your skills again after using them. With this hack, you can reduce the cooldown time to zero, allowing you to spam your skills as much as you want.

          -

          To do this hack, you need to follow these steps:

          -
            -
          1. Go to Practice Mode and select Dio as your character.
          2. -
          3. Go to the Skill Tree tab and select the Leviathan tab.
          4. -
          5. Get the Time Curse Enhancer skill at level 3 and equip it on your skill slot Z.
          6. -
          7. Go to the Second Job tab and get the Far Away skill and equip it on your skill slot X.
          8. -
          9. Use the Far Away skill until your Time Curse Enhancer meter is full.
          10. -
          11. Scan for 1082130432 using Love Engine 0.5 with Float as the value type.
          12. -
          13. Wait for the Time Curse Enhancer meter to reach zero and scan for 0 using Love Engine 0.5 with Float as the value type.
          14. -
          15. You should get one address that matches your scan criteria.
          16. -
          17. Change the value of that address to 1982130432 and freeze it.
          18. -
          19. You have successfully activated the Cooldown hack.
          20. -
          -

          With this hack, you can use your skills without any cooldown time and unleash powerful combos on your enemies. However, be careful not to use this hack in PvP mode, as it might get you reported and banned by other players.

          -
          No Use MP/AP For Skill Hack: How to use your skills without consuming MP/AP using Love Engine 0.5
          -

          MP and AP are resources that you need to use your skills in Grand Chase. MP is used for basic skills and AP is used for special skills. With this hack, you can use your skills without consuming any MP or AP, making them free to use.

          -

          To do this hack, you need to follow these steps:

          -
            -
          1. Use the same address that you used for the Cooldown hack (the one with Float as the value type).
          2. -
          3. Add a new address manually by subtracting 6 from the address of the Cooldown hack. For example, if the address of the Cooldown hack is 044567, then add a new address with 044561.
          4. -
          5. This new address should have a value of 16256. Change it to 1500 and freeze it.
          6. -
          7. You have successfully activated the No Use MP/AP For Skill hack.
          8. -
          -

          With this hack, you can use your skills without worrying about running out of MP or AP. However, be careful not to use this hack in PvP mode, as it might get you reported and banned by other players.

          -
          Crit Hack: How to increase your critical rate and damage using Love Engine 0.5
          -

          Critical is a feature that makes your attacks deal extra damage based on a certain percentage. With this hack, you can increase your critical rate and damage to maximum, making every hit a critical hit.

          -

          To do this hack, you need to follow these steps:

          -
            -
          1. Go to Practice Mode and select any character with a third job (such as Lire).
          2. -
          3. Scan for 1008981942 using Love Engine 0.5. You should get one address that matches your scan criteria.
          4. -
          5. Scan for 1008981770 using Love Engine 0.5. You should get many addresses that match your scan criteria.
          6. -
          7. Change your job to first job (such as Archer) and scan for 0 using Love Engine 0.5.
          8. -
          9. You should get two addresses that match your scan criteria: one with a value of 188 and one with a value of 0.
          10. -
          11. Change the value of the address with 188 to 1071332795 and freeze it.
          12. -
          13. Change the value of the address with 0 to 1071332615 and freeze it.
          14. -
          15. You have successfully activated the Crit hack.
          16. -
          -

          With this hack, you can deal massive damage with every attack and make your enemies fall in one hit. However, be careful not to use this hack in PvP mode, as it might get you reported and banned by other players.

          -
          Damage Hack: How to boost your attack power using Love Engine 0.5
          -
          Damage Hack: How to boost your attack power using Love Engine 0.5
          -

          Damage is the amount of harm that you inflict on your enemies with your attacks. With this hack, you can boost your damage to a very high level, making your attacks more powerful and effective.

          -

          To do this hack, you need to follow these steps:

          -
            -
          1. Go to Practice Mode and select Ronan as your character.
          2. -
          3. Go to the Special tab in the Skill Tree and get the Holy Bless skill and equip it on your skill slot Z.
          4. -
          5. Use the Holy Bless skill and scan for 1036831774 using Love Engine 0.5. You should get one address that matches your scan criteria.
          6. -
          7. Scan for 1036831949 using Love Engine 0.5. You should get many addresses that match your scan criteria.
          8. -
          9. Wait for the Holy Bless skill to wear off and scan for 211 using Love Engine 0.5 for the first address and 0 for the second address.
          10. -
          11. You should get two addresses that match your scan criteria: one with a value of 211 and one with a value of 0.
          12. -
          13. Change the value of the address with 211 to 1077936339 and freeze it.
          14. -
          15. Change the value of the address with 0 to 1077936128 and freeze it.
          16. -
          17. You have successfully activated the Damage hack.
          18. -
          -

          With this hack, you can increase your attack power significantly and deal huge damage to your enemies. However, be careful not to use this hack in PvP mode, as it might get you reported and banned by other players.

          -
          MP Hack: How to refill your MP instantly using Love Engine 0.5
          -

          MP is the resource that you need to use your basic skills in Grand Chase. With this hack, you can refill your MP instantly whenever you want, allowing you to use your skills without any limitation.

          -

          To do this hack, you need to follow these steps:

          -
            -
          1. Go to Practice Mode and select Lire as your character.
          2. -
          3. Go to the Skill Tree tab and select the MP Recovery skill and equip it on your skill slot X.
          4. -
          5. Use the MP Recovery skill by pressing X+UP and scan for 983386500 using Love Engine 0.5.
          6. -
          7. You should get one address that matches your scan criteria.
          8. -
          9. Scan for 983386450 using Love Engine 0.5. You should get one address that matches your scan criteria (if you get many addresses, wait for the MP Recovery skill to wear off and scan for 0).
          10. -
          11. If you want to change the value of your MP, do it in the server selection screen (not in Practice Mode) or you will get detected by the anti-cheat system.
          12. -
          13. Change the value of the first address to 1983386500 and freeze it.
          14. -
          15. Change the value of the second address to 1983386450 and freeze it.
          16. -
          17. You have successfully activated the MP hack.
          18. -
          -

          With this hack, you can refill your MP instantly and use your skills as much as you want. However, be careful not to use this hack in PvP mode, as it might get you reported and banned by other players.

          -
          HP Recovery Hack: How to heal yourself quickly using Love Engine 0.5
          -

          HP is the resource that represents your health in Grand Chase. With this hack, you can heal yourself quickly whenever you want, allowing you to survive any damage or injury.

          -

          To do this hack, you need to follow these steps:

          -
            -
          1. Go to Practice Mode and select Amy as your character.
          2. -
          3. Scan for 1065353216 using Love Engine 0.5.
          4. -
          5. Use your third skill by pressing Z+DOWN and scan for 1073741824 using Love Engine 0.5.
          6. -
          7. Wait for the skill effect to wear off and scan for 1065353216 again using Love Engine 0.5.
          8. -
          9. You should get one address that matches your scan criteria.
          10. -

            What are the risks and precautions of using Love Engine 0.5 for Grand Chase?

            -

            While using Love Engine 0.5 for Grand Chase can be fun and rewarding, it also comes with some risks and drawbacks that you should be aware of before using it. Some of the risks and precautions of using Love Engine 0.5 for Grand Chase are:

            -

            The possibility of getting detected and banned by the game's anti-cheat system

            -

            Grand Chase Madness, the private server that we recommend for using Love Engine 0.5, has an anti-cheat system that monitors and detects any abnormal activity or modification in the game. If you are caught using Love Engine 0.5 or any other cheat tool, you might get detected and banned by the anti-cheat system, which means you will lose your account and progress in the game.

            -

            To avoid getting detected and banned by the anti-cheat system, you should follow these tips:

            -
              -
            • Do not use Love Engine 0.5 in PvP mode, as other players might notice and report you.
            • -
            • Do not use Love Engine 0.5 in public dungeons or raids, as other players might notice and report you.
            • -
            • Do not use Love Engine 0.5 in events or missions that have leaderboards or rankings, as your scores might be suspicious and reported.
            • -
            • Do not use Love Engine 0.5 excessively or blatantly, as it might trigger the anti-cheat system or raise suspicion from other players.
            • -
            • Do not share your account or password with anyone, as they might use Love Engine 0.5 on your account and get you banned.
            • -
            • Do not download or use any other cheat tool or mod that might interfere with Love Engine 0.5 or the anti-cheat system.
            • -
            -

            The potential damage to your computer and data by malicious software or viruses

            -

            Love Engine 0.5 is a modified version of Cheat Engine, which is a software that can scan and edit the memory of any process running on your computer. This means that Love Engine 0.5 can potentially access and modify any data or file on your computer, which might cause damage or corruption to your system or personal information.

            -

            To avoid any damage or virus infection to your computer and data by Love Engine 0.5, you should follow these tips:

            -
              -
            • Make sure you download Love Engine 0.5 from a reliable source, such as the link we provided above.
            • -
            • Do not click on any suspicious or pop-up ads that might redirect you to malicious websites or downloads.
            • -
            • Scan your computer with an antivirus software before and after installing Love Engine 0.5.
            • -
            • Disable or whitelist Love Engine 0.5 from your antivirus software or firewall, as they might block or delete it as a false positive.
            • -
            • Backup your important data and files regularly, in case of any loss or corruption caused by Love Engine 0.5.
            • -
            -

            The ethical and moral implications of cheating in an online game

            -

            Love Engine 0.5 is a cheat tool that gives you an unfair advantage over other players in Grand Chase Madness, which is an online game that involves cooperation and competition with other players. By using Love Engine 0.5, you are violating the rules and spirit of the game, which might ruin the fun and enjoyment for yourself and others.

            -

            To avoid any ethical and moral issues of cheating in an online game, you should follow these tips:

            -
              -
            • Use Love Engine 0.5 only for personal entertainment and experimentation, not for malicious or greedy purposes.
            • -
            • Use Love Engine 0.5 only in solo mode or with friends who consent to it, not with strangers who might be offended or harmed by it.
            • -
            • Use Love Engine 0.5 only in moderation and balance, not in excess or abuse that might make the game boring or meaningless for you.
            • -
            • Respect other players and their choices, whether they use Love Engine 0.5 or not, and do not judge or harass them for it.
            • -
            • Acknowledge the risks and consequences of using Love Engine 0.5, and take responsibility for your actions if you get caught or banned by it.
            • -
            -

            The tips on how to use Love Engine 0.5 safely and responsibly for Grand Chase

            -

            To summarize, here are some tips on how to use Love Engine 0.5 safely and responsibly for Grand Chase:

            -
              -
            • Download Love Engine 0.5 from a reliable source and scan it with an antivirus software before and after installing it.
            • -
            • Rename the files of Love Engine 0.5 to avoid detection by the anti-cheat system.
            • -
            • Attach Love Engine 0.5 to main.exe (the process name of Grand Chase Madness) using the computer button on the interface.
            • -4 bytes). -
            • Click on First Scan and Next Scan to narrow down the list of addresses and values until you find the one you want to edit.
            • -
            • Double-click on the address or value you want to edit and change it to your desired value in the bottom box.
            • -
            • Check the box next to the address or value you want to edit to freeze it and prevent it from changing.
            • -
            • Enjoy your hacked value and repeat the process for other values you want to hack.
            • -
            -

            Conclusion

            -

            In conclusion, Love Engine 0.5 is a powerful cheat tool that can help you modify various aspects of Grand Chase Madness, such as your stats, skills, items, and more. By using Love Engine 0.5, you can enjoy many benefits, such as breezing through the game's stages and dungeons, defeating any boss or enemy, unlocking and upgrading any character or item, impressing and dominating other players in PvP mode, and having more fun and excitement in playing Grand Chase.

            -

            However, you should also be aware of the risks and precautions involved in using Love Engine 0.5, such as getting detected and banned by the game's anti-cheat system, damaging your computer and data by malicious software or viruses, violating the rules and spirit of the game, and ruining the fun and enjoyment for yourself and others. To avoid these risks and precautions, you should follow some tips on how to use Love Engine 0.5 safely and responsibly for Grand Chase, such as downloading it from a reliable source, renaming its files, attaching it to main.exe, scanning and editing values using hexadecimal numbers with appropriate value types, clicking on First Scan and Next Scan, changing and freezing values, and using it only for personal entertainment and experimentation, not for malicious or greedy purposes.

            -

            We hope that this article has helped you learn everything you need to know about Love Engine 0.5 for Grand Chase. If you have any questions or feedback, feel free to leave a comment below or join our Discord server. Happy hacking!

            -

            FAQs

            -

            Here are some frequently asked questions about Love Engine 0.5 for Grand Chase:

            -

            Q: Is Love Engine 0.5 safe to use?

            -

            A: Love Engine 0.5 is safe to use if you download it from a reliable source (such as the link we provided above), scan it with an antivirus software before and after installing it, disable or whitelist it from your antivirus software or firewall, backup your important data and files regularly, and use it only in solo mode or with friends who consent to it.

            -

            Q: Is Love Engine 0.5 legal to use?

            -

            A: Love Engine 0.5 is not legal to use according to the terms of service of Grand Chase Madness (the private server that we recommend for using Love Engine 0.5), which prohibit any form of cheating or hacking in the game. If you are caught using Love Engine 0.5 or any other cheat tool by the game's anti-cheat system or by other players who report you, you might get detected and banned by the game's staff, which means you will lose your account and progress in the game.

            -

            Q: Is Love Engine 0.5 ethical to use?

            -

            A: Love Engine 0.5 is not ethical to use according to the rules and spirit of Grand Chase Madness (the private server that we recommend for using Love Engine 0.5), which promote fair play and cooperation among players. By using Love Engine 0.5 or any other cheat tool, you are violating these rules and spirit, which might ruin the fun and enjoyment for yourself and others who play the game legitimately.

            -

            Q: Is Love Engine 0.5 compatible with other versions of Grand Chase?

            -

            A: Love Engine 0.5 is compatible with other versions of Grand Chase that use main.exe as their process name (such as Grand Chase History), but we do not recommend using it on them because they might have different anti-cheat systems or values that might cause errors or crashes when using Love Engine 0.5.

            -

            Q: Is Love Engine 0.5 updated regularly?

            - and rgba16 pixel formats in Canvases). You can find these updates on some websites or forums dedicated to Grand Chase Madness or Love Engine 0.5 (such as GameBanana). However, you should be careful when downloading and using these updates, as they might contain viruses or malware that could harm your computer or data.

            -

            ed

            -

            Thank you for reading this article on how to download and use Love Engine 0.5 for Grand Chase. I hope you found it informative and helpful. If you have any questions or feedback, feel free to leave a comment below or join our Discord server. Happy hacking!

            0a6ba089eb
            -
            -
            \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Free Payslip Template Download South Africa A Guide for Employers and Employees.md b/spaces/raedeXanto/academic-chatgpt-beta/Free Payslip Template Download South Africa A Guide for Employers and Employees.md deleted file mode 100644 index 5f552cb386ab3c10104d877ead54a99a6e10cba7..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Free Payslip Template Download South Africa A Guide for Employers and Employees.md +++ /dev/null @@ -1,91 +0,0 @@ -
            -

            Free Payslip Template Download South Africa

            -

            A payslip is a document that shows how much an employee has earned and how much has been deducted from their pay. It is also known as a pay stub, pay slip, salary slip, or wage slip. A payslip is important for both employers and employees, as it provides a clear and transparent record of the payment details and helps with tax compliance, budgeting, and dispute resolution.

            -

            Free Payslip Template Download South Africa


            Download ✯✯✯ https://tinourl.com/2uL5oM



            -

            In South Africa, there are legal requirements for issuing payslips to employees. According to the Basic Conditions of Employment Act (BCEA), every employer must give each employee a written payslip at every pay period, whether weekly, fortnightly, or monthly. The payslip must contain certain information, such as the employer's name and address, the employee's name and occupation, the period of payment, the gross pay, the deductions, the net pay, and any other relevant information.

            -

            If you are an employer in South Africa, you may be wondering how to create a payslip for your employees. In this article, we will guide you through the process of creating a payslip, what information to include, how to calculate net pay and tax withholding estimates, and where to find free and editable payslip templates online.

            -

            How to create a payslip for your employees

            -

            Creating a payslip for your employees is not difficult if you follow some simple steps. Here are some of the things you need to consider when creating a payslip:

            -

            What information should be included in a payslip?

            -

            A payslip should include the following information:

            - - The employer's name and address - The employee's name and occupation - The employee's identification number (ID) or tax number - The period of payment (e.g., weekly, fortnightly, monthly) - The number of hours worked or days worked (if applicable) - The gross pay (the total amount earned before deductions) - The deductions (the amounts taken out of the gross pay for various reasons) - The net pay (the amount left after deductions) - Any other relevant information (e.g., overtime pay, bonuses, commissions, allowances, leave pay, etc.)

            What are the common deductions and contributions from a payslip?

            -

            Deductions are amounts that are taken out of the gross pay for various reasons. Some deductions are mandatory by law, while others are voluntary or agreed upon by the employer and employee. Some of the common deductions and contributions from a payslip are:

            - - Income tax (PAYE): This is the tax that is withheld from the employee's income by the employer and paid to the South African Revenue Service (SARS). The amount of income tax depends on the employee's taxable income bracket and tax credits. - Unemployment Insurance Fund (UIF): This is a fund that provides benefits to workers who become unemployed due to various reasons. Both employers and employees contribute 1% of their earnings to this fund each month. - Skills Development Levy (SDL): This is a levy that is paid by employers who have an annual payroll of more than R500,000 to fund education and training programs for workers. Employers pay 1% of their total payroll to this levy each month. - Retirement fund contributions: These are contributions that are made by employers and/or employees to a pension fund or provident fund that provides retirement benefits to workers. The amount of contribution depends on the type of fund and the agreement between the employer and employee. - Medical aid contributions: These are contributions that are made by employers and/or employees to a medical scheme that provides health care benefits to workers. The amount of contribution depends on the type of scheme and the agreement between the employer and employee. - Other deductions: These are deductions that are made for other reasons, such as loan repayments, garnishee orders, union fees, donations, etc. The amount of deduction depends on the nature and terms of each deduction.

            How to calculate net pay and tax withholding estimates?

            -

            Net pay is the amount that is left after all deductions have been taken out of the gross pay. To calculate net pay, you need to subtract all deductions from the gross pay. For example:

            -
            Gross pay = R20,000 Income tax = R3,000 UIF = R200 SDL = R200 Retirement fund contribution = R1,000 Medical aid contribution = R500 Other deductions = R500 Net pay = R20,000 - R3,000 - R200 - R200 - R1,000 - R500 - R500 Net pay = R14,600
            -

            Tax withholding estimates are amounts that are withheld from the employee's income by the employer and paid to SARS as income tax. To calculate tax withholding estimates, you need to use the tax tables provided by SARS for each tax year. You can also use online tools such as SARS Tax Calculator or TaxTim Income Tax Calculator to estimate your tax liability.

            -

            Free payslip template download South Africa

            -

            If you want to save time and hassle when creating payslips for your employees, you can use a free payslip template that you can download online. A free payslip template is a pre-formatted document that contains all the necessary fields and calculations for generating a payslip. All you need to do is fill in your own information and print or email it to your employees.

            -

            Where to find free and editable payslip templates online?

            -

            There are many websites that offer free and editable payslip templates online. Some of them are:

            -

            Download free payslip template for South African employees
            -How to get a free payslip template in South Africa online
            -Free printable payslip template PDF for South Africa
            -Free editable payslip template Excel for South Africa
            -Free monthly payslip template Word for South Africa
            -Free salary slip template download for South African workers
            -Free payroll slip template download for South African companies
            -Free wage slip template download for South African contractors
            -Free pay stub template download for South African freelancers
            -Free pay advice template download for South African consultants
            -Free pay statement template download for South African professionals
            -Free pay sheet template download for South African businesses
            -Free pay voucher template download for South African organizations
            -Free pay receipt template download for South African transactions
            -Free pay invoice template download for South African invoices
            -Free pay record template download for South African records
            -Free pay summary template download for South African summaries
            -Free pay report template download for South African reports
            -Free pay analysis template download for South African analyses
            -Free pay calculation template download for South African calculations
            -Free pay breakdown template download for South African breakdowns
            -Free pay comparison template download for South African comparisons
            -Free pay projection template download for South African projections
            -Free pay forecast template download for South African forecasts
            -Free pay estimate template download for South African estimates
            -Free pay budget template download for South African budgets
            -Free pay plan template download for South African plans
            -Free pay schedule template download for South African schedules
            -Free pay calendar template download for South African calendars
            -Free pay checklist template download for South African checklists
            -Free pay form template download for South African forms
            -Free pay letter template download for South African letters
            -Free pay document template download for South African documents
            -Free pay sample template download for South African samples
            -Free pay example template download for South African examples
            -Free pay format template download for South African formats
            -Free pay layout template download for South African layouts
            -Free pay design template download for South African designs
            -Free pay style template download for South African styles
            -Free pay standard template download for South African standards
            -Free pay guideline template download for South African guidelines
            -Free pay rule template download for South African rules
            -Free pay regulation template download for South African regulations
            -Free pay policy template download for South African policies
            -Free pay procedure template download for South African procedures
            -Free pay process template download for South African processes
            -Free pay system template download for South African systems
            -Free pay method template download for South African methods
            -Free payslip generator online without registration in SA

            - - Xero: This website provides a free payslip template as an editable PDF file. You can also use their online payroll software to streamline your payroll process. - XL Templates: This website provides a free South African payslip format as an Excel worksheet. You can customize it according to your needs and preferences. - PaySpace: This website provides free sample payslips as PDF files. You can also use their cloud-based payroll software to manage your payroll efficiently.

            How to use a payslip template to generate payslips for your employees?

            -

            To use a payslip template to generate payslips for your employees, you need to follow these steps:

            - - Download or open the payslip template file from your preferred website. - Fill in your company name, address, logo, contact details, etc. - Fill in your employee's name, ID number or tax number, occupation, period of payment, hours worked or days worked (if applicable), gross pay, deductions, net pay, and any other relevant information. - Check if all calculations are correct and accurate. - Save or print or email the payslip to your employee.

            What are the benefits of using a payslip template?

            -

            Using a payslip template has many benefits for both employers and employees. Some of them are:

            - - It saves time and effort by automating calculations and formatting. - It ensures consistency and accuracy by following standard formats and formulas. - It reduces errors and disputes by providing clear and transparent records of payment details. - It enhances compliance by meeting legal requirements for issuing payslips. - It improves communication and trust by showing respect and appreciation for employees' work.

            Conclusion

            - much has been deducted from their pay. It is important for both employers and employees to issue and receive payslips, as they provide a clear and transparent record of the payment details and help with tax compliance, budgeting, and dispute resolution.

            -

            In this article, we have guided you through the process of creating a payslip for your employees, what information to include, how to calculate net pay and tax withholding estimates, and where to find free and editable payslip templates online. We hope that this article has been helpful and informative for you.

            -

            Here are some tips and advice for employers and employees on payslips:

            - - Employers should issue payslips to their employees at every pay period, whether weekly, fortnightly, or monthly, as required by law. - Employees should check their payslips carefully and report any errors or discrepancies to their employers as soon as possible. - Employers and employees should keep copies of their payslips for at least five years for tax purposes and reference. - Employers and employees should consult a professional accountant or tax advisor if they have any questions or doubts about their payslips or tax obligations.

            FAQs

            -

            What is the difference between a salary slip and a wage slip?

            -

            A salary slip and a wage slip are both types of payslips that show how much an employee has earned and how much has been deducted from their pay. The main difference between them is that a salary slip is issued to an employee who receives a fixed amount of money per month, regardless of the number of hours or days worked, while a wage slip is issued to an employee who receives a variable amount of money per hour or day worked.

            -

            How often should I issue payslips to my employees?

            -

            According to the Basic Conditions of Employment Act (BCEA), every employer must give each employee a written payslip at every pay period, whether weekly, fortnightly, or monthly. The pay period is the interval of time between two consecutive payments to an employee.

            -

            How can I check if my payslip is accurate and compliant?

            -

            To check if your payslip is accurate and compliant, you can use online tools such as SARS Tax Calculator or TaxTim Income Tax Calculator to estimate your tax liability and compare it with your income tax deduction on your payslip. You can also check if your payslip contains all the required information as specified by the BCEA. If you find any errors or discrepancies on your payslip, you should report them to your employer as soon as possible.

            -

            What should I do if I find an error or discrepancy in my payslip?

            -

            If you find an error or discrepancy in your payslip, you should report it to your employer as soon as possible. You should also keep a copy of your payslip and any supporting documents that can prove the error or discrepancy. Your employer should investigate the matter and correct the error or discrepancy within a reasonable time. If your employer fails to do so, you can lodge a complaint with the Department of Labour or seek legal advice.

            -

            Can I get a payslip if I am self-employed or work as a freelancer?

            -

            If you are self-employed or work as a freelancer, you may not receive a payslip from your clients or customers. However, you can still create your own payslip using a free payslip template online. This can help you keep track of your income and expenses, manage your cash flow, and prepare your tax returns.

            -

            0a6ba089eb
            -
            -
            \ No newline at end of file diff --git a/spaces/raedeXanto/academic-chatgpt-beta/Gta Vice City Deluxe Full Version Pc Downloadinstmanksl Experience the Ultimate Vice City with this Mod.md b/spaces/raedeXanto/academic-chatgpt-beta/Gta Vice City Deluxe Full Version Pc Downloadinstmanksl Experience the Ultimate Vice City with this Mod.md deleted file mode 100644 index 6ed16c34a92d8be569e4fd1bc5205a825f28e291..0000000000000000000000000000000000000000 --- a/spaces/raedeXanto/academic-chatgpt-beta/Gta Vice City Deluxe Full Version Pc Downloadinstmanksl Experience the Ultimate Vice City with this Mod.md +++ /dev/null @@ -1,160 +0,0 @@ -
            -

            Gta Vice City Deluxe Full Version Pc Downloadinstmanksl

            -

            Are you a fan of the classic Grand Theft Auto: Vice City game? Do you want to experience it with a new twist and more fun? If yes, then you should try Gta Vice City Deluxe, a total conversion mod that adds tons of new features and improvements to the original game. In this article, we will tell you everything you need to know about this amazing mod, including what it is, why you should play it, what are its features, how to download and install it, and some FAQs.

            -

            Gta Vice City Deluxe Full Version Pc Downloadinstmanksl


            Download Zip –––––>>> https://tinourl.com/2uKZdi



            -

            Introduction

            -

            Grand Theft Auto: Vice City is one of the most popular and influential games of all time. Released in 2002 by Rockstar Games, it is the fourth installment in the Grand Theft Auto series and the second one to use a 3D engine. The game is set in 1986 in a fictional city based on Miami, Florida, where you play as Tommy Vercetti, a former mobster who is sent to Vice City by his boss to establish a criminal empire. The game features an open-world environment where you can explore, drive, shoot, fight, and do various missions and side activities.

            -

            However, as great as the game is, it is not perfect. Some of the graphics, vehicles, buildings, and weapons are outdated or unrealistic. Some of the map areas are empty or boring. Some of the gameplay elements are repetitive or frustrating. That's why some modders decided to create Gta Vice City Deluxe, a mod that enhances and expands the game in every possible way.

            -

            What is Gta Vice City Deluxe?

            -

            Gta Vice City Deluxe is a total conversion mod for Gta Vice City's PC version that was released in 2007. It is inspired by the realGTA3 mod for Gta III, which also improved the graphics and gameplay of that game. Gta Vice City Deluxe replaces over 90 vehicles with realistic high-resolution ones, adds tons of new buildings and map changes around the city, introduces new weapons and stunts, and makes many other tweaks and fixes to make the game more enjoyable.

            -

            Gta Vice City Deluxe is not an official update or patch from Rockstar Games. It is a fan-made project that requires the original game to work. It is also not compatible with other mods or save files from the vanilla game. You will need to start a new game or use a compatible save file to play Gta Vice City Deluxe.

            -

            Why should you play Gta Vice City Deluxe?

            -

            If you love Gta Vice City and want to experience it in a new way, then you should definitely play Gta Vice City Deluxe. It will make you feel like you are playing a remastered or definitive edition of the game with better graphics, more content, and more fun. You will be amazed by how much the mod changes and improves the game without losing its original charm and spirit.

            -

            Some of the reasons why you should play Gta Vice City Deluxe are:

            -
              -
            • You will enjoy driving new cars, bikes, boats, helicopters, planes, and other vehicles that look more realistic and detailed than ever before.
            • -
            • You will explore new areas and buildings that add more variety and life to the city.
            • -
            • You will use new weapons such as flamethrowers, rocket launchers, sniper rifles, grenades, and more that add more excitement and challenge to the combat.
            • -
            • You will perform new stunts such as jumping ramps, grinding rails, doing flips, and more that add more fun and creativity to the gameplay.
            • -
            • You will experience many other changes such as improved lighting and weather effects, new radio stations and music tracks, new pedestrians and cops, new animations and sounds, and more that make the game more immersive and realistic.
            • -
            -

            Features of Gta Vice City Deluxe

            -

            Gta Vice City Deluxe has many features that make it one of the best mods for Gta Vice City. Here are some of them:

            -

            Gta Vice City Deluxe Pc Game Free Download Full
            -Download Gta Vice City Deluxe Mod For Pc
            -How To Install Gta Vice City Deluxe On Pc
            -Gta Vice City Deluxe Cheats Codes For Pc
            -Gta Vice City Deluxe Pc Requirements
            -Gta Vice City Deluxe Pc Gameplay
            -Gta Vice City Deluxe Pc Download Utorrent
            -Gta Vice City Deluxe Pc Highly Compressed
            -Gta Vice City Deluxe Pc Trainer
            -Gta Vice City Deluxe Pc Mods
            -Gta Vice City Deluxe Pc Crack
            -Gta Vice City Deluxe Pc Save Game
            -Gta Vice City Deluxe Pc Online
            -Gta Vice City Deluxe Pc Patch
            -Gta Vice City Deluxe Pc Windows 10
            -Gta Vice City Deluxe Pc Review
            -Gta Vice City Deluxe Pc Keyboard Controls
            -Gta Vice City Deluxe Pc Tips And Tricks
            -Gta Vice City Deluxe Pc Sound Fix
            -Gta Vice City Deluxe Pc Graphics Settings
            -Gta Vice City Deluxe Pc Cars List
            -Gta Vice City Deluxe Pc Missions Guide
            -Gta Vice City Deluxe Pc Easter Eggs
            -Gta Vice City Deluxe Pc Secrets
            -Gta Vice City Deluxe Pc Glitches
            -Gta Vice City Deluxe Pc Map
            -Gta Vice City Deluxe Pc Radio Stations
            -Gta Vice City Deluxe Pc Weapons List
            -Gta Vice City Deluxe Pc Skins
            -Gta Vice City Deluxe Pc Multiplayer Mod
            -Gta Vice City Deluxe Pc No Cd
            -Gta Vice City Deluxe Pc Rar Password
            -Gta Vice City Deluxe Pc Ocean Of Games
            -Gta Vice City Deluxe Pc Apunkagames
            -Gta Vice City Deluxe Pc Softonic
            -Gta Vice City Deluxe Pc Steam
            -Gta Vice City Deluxe Pc Rockstar Games
            -Gta Vice City Deluxe Full Version Free Download For Android
            -Download And Install Gta Vice City Deluxe Full Version For Mobile
            -How To Play Gta Vice City Deluxe Full Version On Smartphone
            -Best Settings For Gta Vice City Deluxe Full Version On Phone
            -How To Use Cheats In Gta Vice City Deluxe Full Version On Mobile
            -How To Transfer Save Data From Pc To Mobile For Gta Vice City Deluxe Full Version
            -How To Connect Controller To Mobile For Playing Gta Vice City Deluxe Full Version
            -How To Fix Lag In Gta Vice City Deluxe Full Version On Mobile
            -How To Update Gta Vice City Deluxe Full Version On Mobile
            -How To Download Mods For Gta Vice City Deluxe Full Version On Mobile
            -How To Change Language In Gta Vice City Deluxe Full Version On Mobile
            -How To Unlock All Missions In Gta Vice City Deluxe Full Version On Mobile
            -How To Get Unlimited Money In Gta Vice City Deluxe Full Version On Mobile

            -

            Over 90 new vehicles with realistic graphics

            -

            One of the main features of Gta Vice City Deluxe is that it replaces over 90 vehicles from the original game with new ones that have high-resolution textures and models. These vehicles include cars, bikes,

            boats, helicopters, planes, and other vehicles that look more realistic and detailed than ever before. Some of the new vehicles are based on real-life models, such as the Ferrari Testarossa (Cheetah), the Lamborghini Countach (Infernus), the Porsche 911 (Comet), the DeLorean DMC-12 (Deluxo), and the Hummer H1 (Patriot). Some of the new vehicles are also unique to the mod, such as the Hotring Racer, the Bloodring Banger, the Love Fist limo, and the Hunter helicopter.

            -

            Tons of new buildings and map changes

            -

            Another feature of Gta Vice City Deluxe is that it adds tons of new buildings and map changes around the city. These include realistic skyscrapers, hotels, casinos, shops, restaurants, clubs, stadiums, parks, bridges, and more. Some of the new buildings are based on real-life landmarks in Miami, such as the Freedom Tower (Vice Point Langer), the Biltmore Hotel (Ocean View Hotel), the Fontainebleau Hotel (Malibu Club), and the Miami Seaquarium (Vice City Aquarium). Some of the new buildings are also unique to the mod, such as the skatepark, the motocross track, the golf course, and the airport terminal.

            -

            New weapons and stunts

            -

            The third feature of Gta Vice City Deluxe is that it introduces new weapons and stunts to the game. The mod adds 18 new weapons to the game, such as flamethrowers, rocket launchers, sniper rifles, grenades, and more. These weapons add more excitement and challenge to the combat and allow you to cause more mayhem and destruction in the city. The mod also adds new stunts to the game, such as jumping ramps, grinding rails, doing flips, and more. These stunts add more fun and creativity to the gameplay and allow you to show off your skills and earn extra money.

            -

            How to download and install Gta Vice City Deluxe

            -

            If you are interested in playing Gta Vice City Deluxe, you will need to download and install it on your PC. Here are the steps you need to follow:

            -

            Requirements and compatibility

            -

            Before you download and install Gta Vice City Deluxe, you will need to make sure that your PC meets the minimum requirements for running the mod. These are:

            -
              -
            • A 64-bit processor and operating system
            • -
            • Windows 10
            • -
            • Intel Core i5-6600K / AMD FX-6300 or equivalent processor
            • -
            • 8 GB of RAM
            • -
            • Nvidia GeForce GTX 760 2GB / AMD Radeon R9 280 3GB or equivalent graphics card
            • -
            • 10 GB of available storage space
            • -
            • A DirectX compatible sound card
            • -
            • The original Gta Vice City game installed on your PC
            • -
            -

            You will also need to make sure that your PC is compatible with Gta Vice City Deluxe. The mod is not compatible with other mods or save files from the vanilla game. You will need to start a new game or use a compatible save file to play Gta Vice City Deluxe. You will also need to disable any antivirus or firewall software that might interfere with the installation process.

            -

            Download links and instructions

            -

            Once you have checked your PC's requirements and compatibility, you can proceed to download Gta Vice City Deluxe from one of these links:

            -
              -
            • Grand Theft Auto: Vice City – The Definitive Edition on Steam: This is a paid version of Gta Vice City Deluxe that includes all three games from The Trilogy – The Definitive Edition: Gta III – The Definitive Edition,

              Gta Vice City – The Definitive Edition, Gta San Andreas – The Definitive Edition, and Gta III – The Definitive Edition. This version costs $59.99 and requires a Steam account to play.
            • -
            • Vice City Deluxe mod for Grand Theft Auto: Vice City - Mod DB: This is a free version of Gta Vice City Deluxe that only includes the mod for Gta Vice City. This version requires the original game to work and does not include the other two games from The Trilogy – The Definitive Edition.
            • -
            -

            After you have chosen your preferred link, follow these instructions to download and install Gta Vice City Deluxe:

            -
              -
            1. Click on the link and follow the instructions on the website to download the file.
            2. -
            3. Locate the downloaded file on your PC and extract it using a software like WinRAR or 7-Zip.
            4. -
            5. Open the extracted folder and run the setup.exe file as administrator.
            6. -
            7. Follow the instructions on the setup wizard to install Gta Vice City Deluxe on your PC.
            8. -
            9. Launch the game from the desktop shortcut or the start menu and enjoy!
            10. -
            -

            Troubleshooting and tips

            -

            If you encounter any problems or errors while downloading or installing Gta Vice City Deluxe, here are some troubleshooting tips that might help:

            -
              -
            • Make sure you have enough storage space on your PC before downloading and installing Gta Vice City Deluxe.
            • -
            • Make sure you have a stable internet connection while downloading Gta Vice City Deluxe.
            • -
            • Make sure you have disabled any antivirus or firewall software that might interfere with the installation process.
            • -
            • Make sure you have installed the latest drivers for your graphics card and sound card.
            • -
            • Make sure you have updated your DirectX version to the latest one.
            • -
            • If you have any other mods or save files from the vanilla game, backup them before installing Gta Vice City Deluxe.
            • -
            • If you have any issues with launching or playing Gta Vice City Deluxe, try running it in compatibility mode or as administrator.
            • -
            -

            Conclusion

            -

            Gta Vice City Deluxe is a fantastic mod that enhances and expands Gta Vice City in every possible way. It adds over 90 new vehicles, tons of new buildings and map changes, new weapons and stunts, and many other improvements to the game. It makes Gta Vice City look more realistic, more varied, and more fun than ever before. If you are a fan of Gta Vice City and want to experience it in a new way, you should definitely download and install Gta Vice City Deluxe on your PC. You will not regret it!

            -

            Summary of the main points

            -

            In this article, we have covered:

            -
              -
            • What is Gta Vice City Deluxe and why you should play it.
            • -
            • What are the features of Gta Vice City Deluxe.
            • -
            • How to download and install Gta Vice City Deluxe on your PC.
            • -
            • How to troubleshoot and fix any problems or errors with Gta Vice City Deluxe.
            • -
            -

            Call to action and recommendation

            -

            If you are interested in playing Gta Vice City Deluxe, don't wait any longer. Click on one of the links below and start downloading it now. You will be amazed by how much this mod changes and improves Gta Vice City. You will have hours of fun exploring, driving, shooting, fighting, and doing various missions and side activities in this enhanced version of the game. You will also be able to compare it with the original game and see how much it has improved over time. You will not be disappointed by this mod!

            -

            We recommend that you play Gta Vice City Deluxe with a controller or a keyboard and mouse for the best experience. We also recommend that you play it on a high-resolution monitor or TV for the best graphics quality. We also recommend that you play it with headphones or speakers for the best sound quality. We also recommend that you play it with friends or online for the best social experience. We also recommend that you play it with snacks and drinks for the best gaming experience.

            -

            Gta Vice City Deluxe is one of the best mods for Gta Vice City ever made. It is a must-play for any fan of Gta Vice City or any fan of open-world games in general. It is a masterpiece of modding that deserves your attention and appreciation. So what are you waiting for? Download it now and enjoy!

            -

            FAQs

            -

            Here are some frequently asked questions about Gta Vice City Deluxe:

            -
              -
            1. Is Gta Vice City Deluxe legal?
            2. -

              Gta Vice City Deluxe is legal as long as you own a copy of the original game. The mod does not infringe any copyrights or trademarks of Rockstar Games or Take-Two Interactive. The mod is a fan-made project that is not affiliated with or endorsed by Rockstar Games or Take-Two Interactive. The mod is made for entertainment purposes only and does not intend to harm or profit from anyone.

              -
            3. Is Gta Vice City Deluxe safe?
            4. -

              Gta Vice City Deluxe is safe as long as you download it from a trusted source. The mod does not contain any viruses, malware, spyware, or other harmful software that might damage your PC or compromise your privacy. The mod is tested and verified by many users who have reported no issues or problems with it. However, if you encounter any suspicious files or links while downloading or installing Gta Vice City Deluxe, do not open them or click on them. Delete them immediately and report them to the mod's developers or moderators.

              -
            5. Is Gta Vice City Deluxe compatible with other mods?
            6. -

              Gta Vice City Deluxe is not compatible with other mods or save files from the vanilla game. The mod is a total conversion mod that changes almost everything in the game. It has its own files, folders, scripts, models, textures, sounds, music, etc. that might conflict with other mods or save files from the vanilla game. If you try to use other mods or save files from the vanilla game with Gta Vice City Deluxe, you might experience crashes, glitches, bugs, errors, or other problems that might ruin your gameplay experience. Therefore, we advise you to use only Gta Vice City Deluxe without any other mods or save files from the vanilla game.

              -
            7. How can I uninstall Gta Vice City Deluxe?
            8. -

              If you want to uninstall Gta Vice City Deluxe from your PC, you can do so by following these steps:

              -
                -
              1. Go to your PC's Control Panel and click on Programs and Features.
              2. -
              3. Find Grand Theft Auto: Vice City – The Definitive Edition (if you downloaded it from Steam) or Grand Theft Auto: Vice City (if you downloaded it from Mod DB) in the list of programs and click on Uninstall/Change.
              4. -
              5. Follow the instructions on the uninstall wizard to remove Gta Vice City Deluxe from your PC.
              6. -
              7. Delete any remaining files or folders related to Gta Vice City Deluxe from your PC's hard drive.
              8. -
              -

              Note: If you want to reinstall Gta Vice City Deluxe later, you will need to download it again from one of the links provided above.

              -
            9. Where can I find more information about Gta Vice City Deluxe?
            10. -

              If you want to find more information about Gta Vice City Deluxe, such as updates, news, screenshots, videos,

              reviews, feedback, support, or contact details, you can visit one of these websites:

              - -

              These are the best sources of information about Gta Vice City Deluxe. If you have any questions or comments about the mod, you can post them on these websites and get answers or feedback from the mod's developers or other users.

              -

              0a6ba089eb
              -
              -
              \ No newline at end of file diff --git a/spaces/ramkamal2000/voice-conversion-ddp/speaker_encoder/data_objects/speaker_verification_dataset.py b/spaces/ramkamal2000/voice-conversion-ddp/speaker_encoder/data_objects/speaker_verification_dataset.py deleted file mode 100644 index cecd8ed8ac100b80d5087fa47f22f92c84fea032..0000000000000000000000000000000000000000 --- a/spaces/ramkamal2000/voice-conversion-ddp/speaker_encoder/data_objects/speaker_verification_dataset.py +++ /dev/null @@ -1,56 +0,0 @@ -from speaker_encoder.data_objects.random_cycler import RandomCycler -from speaker_encoder.data_objects.speaker_batch import SpeakerBatch -from speaker_encoder.data_objects.speaker import Speaker -from speaker_encoder.params_data import partials_n_frames -from torch.utils.data import Dataset, DataLoader -from pathlib import Path - -# TODO: improve with a pool of speakers for data efficiency - -class SpeakerVerificationDataset(Dataset): - def __init__(self, datasets_root: Path): - self.root = datasets_root - speaker_dirs = [f for f in self.root.glob("*") if f.is_dir()] - if len(speaker_dirs) == 0: - raise Exception("No speakers found. Make sure you are pointing to the directory " - "containing all preprocessed speaker directories.") - self.speakers = [Speaker(speaker_dir) for speaker_dir in speaker_dirs] - self.speaker_cycler = RandomCycler(self.speakers) - - def __len__(self): - return int(1e10) - - def __getitem__(self, index): - return next(self.speaker_cycler) - - def get_logs(self): - log_string = "" - for log_fpath in self.root.glob("*.txt"): - with log_fpath.open("r") as log_file: - log_string += "".join(log_file.readlines()) - return log_string - - -class SpeakerVerificationDataLoader(DataLoader): - def __init__(self, dataset, speakers_per_batch, utterances_per_speaker, sampler=None, - batch_sampler=None, num_workers=0, pin_memory=False, timeout=0, - worker_init_fn=None): - self.utterances_per_speaker = utterances_per_speaker - - super().__init__( - dataset=dataset, - batch_size=speakers_per_batch, - shuffle=False, - sampler=sampler, - batch_sampler=batch_sampler, - num_workers=num_workers, - collate_fn=self.collate, - pin_memory=pin_memory, - drop_last=False, - timeout=timeout, - worker_init_fn=worker_init_fn - ) - - def collate(self, speakers): - return SpeakerBatch(speakers, self.utterances_per_speaker, partials_n_frames) - \ No newline at end of file diff --git a/spaces/reach-vb/text-iterater/app.py b/spaces/reach-vb/text-iterater/app.py deleted file mode 100644 index d4fc02351749e0c94285346dae505833d3b819c6..0000000000000000000000000000000000000000 --- a/spaces/reach-vb/text-iterater/app.py +++ /dev/null @@ -1,44 +0,0 @@ -from cProfile import label -import gradio as gr -from transformers import AutoTokenizer, AutoModelForSeq2SeqLM - -tokenizer = AutoTokenizer.from_pretrained("wanyu/IteraTeR-PEGASUS-Revision-Generator") -model = AutoModelForSeq2SeqLM.from_pretrained("wanyu/IteraTeR-PEGASUS-Revision-Generator") - -def prep_input(text): - text = text.strip() - clarity_input = " " + text - fluency_input = " " + text - coherence_input = " " + text - style_input = """" % (w, h) # create a table css - # create a table of images. - title = self.name - label_html = '' - label_html_row = '' - images = [] - idx = 0 - for label, image in visuals.items(): - if image.size(3) < 64: - image = torch.nn.functional.interpolate( - image, size=(64, 64), - mode='bilinear', align_corners=False) - image_numpy = util.tensor2im(image[:max_num_images]) - label_html_row += '%s' % label - images.append(image_numpy.transpose([2, 0, 1])) - idx += 1 - if idx % ncols == 0: - label_html += '%s' % label_html_row - label_html_row = '' - white_image = np.ones_like( - image_numpy.transpose([2, 0, 1])) * 255 - while idx % ncols != 0: - images.append(white_image) - label_html_row += '' - idx += 1 - if label_html_row != '': - label_html += '%s' % label_html_row - try: - func_timeout(15, self.vis.images, - args=(images, ncols, 2, self.display_id + 1, - None, dict(title=title + ' images'))) - label_html = '%s
              ' % label_html - self.vis.text(table_css + label_html, - win=self.display_id + 2, - opts=dict(title=title + ' labels')) - except FunctionTimedOut: - print("visdom call to display image timed out") - pass - except VisdomExceptionBase: - self.create_visdom_connections() - - else: # show each image in a separate visdom panel; - idx = 1 - try: - for label, image in visuals.items(): - image_numpy = util.tensor2im(image[:4]) - try: - func_timeout(5, self.vis.image, args=( - image_numpy.transpose([2, 0, 1]), - self.display_id + idx, - None, - dict(title=label) - )) - except FunctionTimedOut: - print("visdom call to display image timed out") - pass - idx += 1 - except VisdomExceptionBase: - self.create_visdom_connections() - - needs_save = save_result or not self.saved - if self.use_html and needs_save: - self.saved = True - # save images to the disk - for label, image in visuals.items(): - image_numpy = util.tensor2im(image[:4]) - img_path = os.path.join( - self.img_dir, 'epoch%.3d_%s.png' % (epoch, label)) - util.save_image(image_numpy, img_path) - - # update website - webpage = html.HTML( - self.web_dir, 'Experiment name = %s' % self.name, refresh=0) - for n in range(epoch, 0, -1): - webpage.add_header('epoch [%d]' % n) - ims, txts, links = [], [], [] - - for label, image_numpy in visuals.items(): - image_numpy = util.tensor2im(image) - img_path = 'epoch%.3d_%s.png' % (n, label) - ims.append(img_path) - txts.append(label) - links.append(img_path) - webpage.add_images(ims, txts, links, width=self.win_size) - webpage.save() - - def plot_current_losses(self, epoch, counter_ratio, losses): - """display the current losses on visdom display: dictionary of error labels and values - - Parameters: - epoch (int) -- current epoch - counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1 - losses (OrderedDict) -- training losses stored in the format of (name, float) pairs - """ - if len(losses) == 0: - return - - plot_name = '_'.join(list(losses.keys())) - - if plot_name not in self.plot_data: - self.plot_data[plot_name] = {'X': [], 'Y': [], 'legend': list(losses.keys())} - - plot_data = self.plot_data[plot_name] - plot_id = list(self.plot_data.keys()).index(plot_name) - - plot_data['X'].append(epoch + counter_ratio) - plot_data['Y'].append([losses[k] for k in plot_data['legend']]) - try: - self.vis.line( - X=np.stack([np.array(plot_data['X'])] * len(plot_data['legend']), 1), - Y=np.array(plot_data['Y']), - opts={ - 'title': self.name, - 'legend': plot_data['legend'], - 'xlabel': 'epoch', - 'ylabel': 'loss'}, - win=self.display_id - plot_id) - except VisdomExceptionBase: - self.create_visdom_connections() - - # losses: same format as |losses| of plot_current_losses - def print_current_losses(self, iters, times, losses): - """print current losses on console; also save the losses to the disk - - Parameters: - epoch (int) -- current epoch - iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch) - losses (OrderedDict) -- training losses stored in the format of (name, float) pairs - t_comp (float) -- computational time per data point (normalized by batch_size) - t_data (float) -- data loading time per data point (normalized by batch_size) - """ - message = '(iters: %d' % (iters) - for k, v in times.items(): - message += ", %s: %.3f" % (k, v) - message += ") " - for k, v in losses.items(): - message += '%s: %.3f ' % (k, v.mean()) - - print(message) # print the message - with open(self.log_name, "a") as log_file: - log_file.write('%s\n' % message) # save the message diff --git a/spaces/supertori/files/stable-diffusion-webui/modules/face_restoration.py b/spaces/supertori/files/stable-diffusion-webui/modules/face_restoration.py deleted file mode 100644 index 2c86c6ccce338a1411f4367a0bc6e4046ad67cae..0000000000000000000000000000000000000000 --- a/spaces/supertori/files/stable-diffusion-webui/modules/face_restoration.py +++ /dev/null @@ -1,19 +0,0 @@ -from modules import shared - - -class FaceRestoration: - def name(self): - return "None" - - def restore(self, np_image): - return np_image - - -def restore_faces(np_image): - face_restorers = [x for x in shared.face_restorers if x.name() == shared.opts.face_restoration_model or shared.opts.face_restoration_model is None] - if len(face_restorers) == 0: - return np_image - - face_restorer = face_restorers[0] - - return face_restorer.restore(np_image) diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/CIMPLICITy 6.1 Crack.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/CIMPLICITy 6.1 Crack.md deleted file mode 100644 index ee94414022595e49084e757694a94f182c00042f..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/CIMPLICITy 6.1 Crack.md +++ /dev/null @@ -1,6 +0,0 @@ -

              CIMPLICITy 6.1 Crack


              DOWNLOADhttps://cinurl.com/2uEZ8D



              - -http://www.ge-ip.com/products/proficy-hmi-scada-cimplicity/p2819This video is part of a "how to" series to ... 1fdad05405
              -
              -
              -

              diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Fanaa Full Movie Download 300mb LINK.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Fanaa Full Movie Download 300mb LINK.md deleted file mode 100644 index 8b1e34fb11768b0c84ea8bafa4907d5d76b14986..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Fanaa Full Movie Download 300mb LINK.md +++ /dev/null @@ -1,117 +0,0 @@ -
              -

              Fanaa Full Movie Download 300MB - How to Watch the Romantic Thriller Online

              - -

              If you are looking for a Bollywood movie that will keep you on the edge of your seat with its twists and turns, then Fanaa is the one for you. Fanaa, which means Destroyed in Love, is a 2006 romantic thriller starring Aamir Khan and Kajol in the lead roles. The movie was directed by Kunal Kohli and produced by Yash Raj Films. It was a huge commercial and critical success, earning over 100 crore rupees at the box office and winning several awards.

              -

              fanaa full movie download 300mb


              Download Filehttps://cinurl.com/2uEYps



              - -

              Fanaa tells the story of Zooni (Kajol), a blind Kashmiri girl who falls in love with Rehan (Aamir Khan), a charming Delhi tour guide. Rehan shows her the beauty of life and promises to make her happy. However, he has a dark secret that can shatter her world. What is Rehan hiding from Zooni? Will their love survive the test of time and fate? Watch Fanaa to find out.

              - -

              How to Download Fanaa Full Movie in 300MB

              - -

              If you want to watch Fanaa online, you have several options to choose from. You can stream the movie on platforms like Amazon Prime Video, Netflix, Hotstar, or YouTube. However, if you want to download the movie in 300MB size, you will need to use a torrent site or a third-party website that offers free movie downloads.

              - -

              One of the most popular torrent sites for downloading Bollywood movies is YTS. You can find Fanaa on YTS in 720p or 1080p quality with subtitles. The file size is around 520 MB for 720p and 1 GB for 1080p. You will need a torrent client like uTorrent or BitTorrent to download the movie from YTS.

              - -

              Another option is to use a website like MovieSpyHD, which provides direct download links for Fanaa in 300MB size. You can also watch the movie online on this website without any registration or sign-up. However, be careful of pop-up ads and malware that may infect your device.

              -

              - -

              Why You Should Watch Fanaa Full Movie

              - -

              Fanaa is not just a typical Bollywood romance. It is a movie that explores the themes of love, sacrifice, patriotism, and terrorism. It has a gripping plot that will keep you hooked till the end. It also has some memorable songs composed by Jatin-Lalit and sung by legends like Shaan, Sonu Nigam, Kailash Kher, and Mahalaxmi Iyer.

              - -

              The best part of Fanaa is the chemistry between Aamir Khan and Kajol, who reunited on screen after eight years since Ishq (1997). They deliver stellar performances as lovers who face many challenges and obstacles in their relationship. Their scenes are filled with emotion, passion, and humor. You will laugh, cry, and swoon with them as they take you on a roller coaster ride of romance and drama.

              - -

              Fanaa is a movie that will touch your heart and make you think about the meaning of life and love. It is a movie that you should not miss if you are a fan of Bollywood or Aamir Khan or Kajol or both. Download Fanaa full movie in 300MB today and enjoy this masterpiece of cinema.

              -

              What Critics and Audiences Said About Fanaa

              - -

              Fanaa received positive reviews from critics and audiences alike. It was praised for its story, direction, music, and performances. It was also appreciated for its realistic portrayal of Kashmir and its issues. Some of the critics' comments are:

              - -
                -
              • "Fanaa is a fine example of how a film can entertain and communicate a message without being preachy." - Taran Adarsh, Bollywood Hungama
              • -
              • "Fanaa is a rare film that combines romance and thrill in a seamless manner. It is a film that will make you laugh, cry, and think." - Rajeev Masand, CNN-IBN
              • -
              • "Fanaa is a film that transcends the boundaries of genre and language. It is a film that speaks to the heart and the mind." - Anupama Chopra, NDTV
              • -
              - -

              The movie also received a lot of love from the audience, who gave it a rating of 7.1 out of 10 on IMDb and 78% on Rotten Tomatoes. Some of the audience's comments are:

              - -
                -
              • "Fanaa is one of my favorite movies of all time. It has everything: romance, action, suspense, drama, and music. Aamir Khan and Kajol are amazing together." - Ravi, Mumbai
              • -
              • "Fanaa is a movie that changed my life. It made me realize the importance of love and sacrifice. It also made me aware of the problems faced by Kashmiris. It is a movie that everyone should watch." - Zara, Delhi
              • -
              • "Fanaa is a movie that I can watch over and over again. It has such a beautiful story and such powerful performances. It is a movie that touches your soul." - Ayesha, Karachi
              • -
              - -
              Conclusion
              - -

              Fanaa is a movie that you should not miss if you are looking for a romantic thriller that will keep you engaged and entertained. It is a movie that will make you fall in love with Aamir Khan and Kajol, who give their best performances in this film. It is a movie that will make you appreciate the beauty of life and love.

              - -

              If you want to watch Fanaa online or download it in 300MB size, you can use the options mentioned above. However, be careful of the risks involved in using torrent sites or third-party websites. Alternatively, you can stream the movie on legal platforms like Amazon Prime Video, Netflix, Hotstar, or YouTube.

              - -

              Download Fanaa full movie in 300MB today and enjoy this masterpiece of cinema.

              - - -- You can add some trivia or facts about the movie, such as the shooting locations, the awards it won, the controversies it faced, etc. -- You can add some quotes or dialogues from the movie that are relevant to the theme or the message of the movie. -- You can add some comparisons or contrasts with other movies of the same genre or with other movies of Aamir Khan or Kajol. -- You can add some personal opinions or experiences related to the movie, such as how it affected you, what you learned from it, how you related to the characters, etc. - - -- You can add some trivia or facts about the movie, such as the shooting locations, the awards it won, the controversies it faced, etc. -- You can add some quotes or dialogues from the movie that are relevant to the theme or the message of the movie. -- You can add some comparisons or contrasts with other movies of the same genre or with other movies of Aamir Khan or Kajol. -- You can add some personal opinions or experiences related to the movie, such as how it affected you, what you learned from it, how you related to the characters, etc. - - -- You can add some trivia or facts about the movie, such as the shooting locations, the awards it won, the controversies it faced, etc. -- You can add some quotes or dialogues from the movie that are relevant to the theme or the message of the movie. -- You can add some comparisons or contrasts with other movies of the same genre or with other movies of Aamir Khan or Kajol. -- You can add some personal opinions or experiences related to the movie, such as how it affected you, what you learned from it, how you related to the characters, etc. - - -- You can add some trivia or facts about the movie, such as the shooting locations, the awards it won, the controversies it faced, etc. -- You can add some quotes or dialogues from the movie that are relevant to the theme or the message of the movie. -- You can add some comparisons or contrasts with other movies of the same genre or with other movies of Aamir Khan or Kajol. -- You can add some personal opinions or experiences related to the movie, such as how it affected you, what you learned from it, how you related to the characters, etc. -

              Where Was Fanaa Full Movie Shot?

              - -

              Fanaa is a movie that showcases the beauty and diversity of India. The movie was shot at various historical and scenic locations in Delhi, Mumbai, Himachal Pradesh, and Gujarat. Some of the places where the movie was filmed are:

              - -
                -
              • Red Fort: This is where Zooni performs with her college group and meets Rehan for the first time.
              • -
              • Jantar Mantar: This is where Rehan takes Zooni to show her the astronomical instruments and tells her about his dream of becoming an astronaut.
              • -
              • Qutub Minar: This is where Rehan proposes to Zooni and they share a romantic moment.
              • -
              • Purana Qila: This is where Zooni undergoes an eye surgery and regains her sight.
              • -
              • Rashtrapati Bhavan: This is where Rehan meets Zooni's parents and impresses them with his charm.
              • -
              • Lodhi Gardens: This is where Zooni and Rehan spend some quality time together before he leaves for a mission.
              • -
              - -

              The movie was also shot in some exotic locations abroad. The Kashmir segment of the movie was originally planned to be shot in the valley, but due to security reasons, it was shifted to the Tatra Mountains in southern Poland. The snow-capped mountains and the wooden cottages created a perfect backdrop for the thrilling climax of the movie.

              - -

              What Awards Did Fanaa Full Movie Win?

              - -

              Fanaa was not only a commercial hit, but also a critical success. The movie won several awards and nominations for its story, direction, music, and performances. Some of the awards that the movie won are:

              - -
                -
              • Filmfare Award for Best Actress - Kajol
              • -
              • Filmfare Award for Best Lyricist - Prasoon Joshi for "Mere Haath Mein"
              • -
              • Filmfare Award for Best Male Playback Singer - Shaan for "Chand Sifarish"
              • -
              • Filmfare Award for Best Female Playback Singer - Mahalaxmi Iyer for "Mere Haath Mein"
              • -
              • IIFA Award for Best Actress - Kajol
              • -
              • IIFA Award for Best Lyricist - Prasoon Joshi for "Chand Sifarish"
              • -
              • IIFA Award for Best Male Playback Singer - Shaan for "Chand Sifarish"
              • -
              • IIFA Award for Best Female Playback Singer - Mahalaxmi Iyer for "Mere Haath Mein"
              • -
              • Zee Cine Award for Best Actress - Kajol
              • -
              • Zee Cine Award for Best Lyricist - Prasoon Joshi for "Chand Sifarish"
              • -
              • Zee Cine Award for Best Male Playback Singer - Shaan for "Chand Sifarish"
              • -
              • Zee Cine Award for Best Female Playback Singer - Mahalaxmi Iyer for "Mere Haath Mein"
              • -
              - -

              The movie also received nominations for other categories such as Best Film, Best Director, Best Actor, Best Music Director, Best Cinematography, Best Editing, etc.

              -
              Conclusion
              - -

              Fanaa is a movie that you should not miss if you are looking for a romantic thriller that will keep you engaged and entertained. It is a movie that will make you fall in love with Aamir Khan and Kajol, who give their best performances in this film. It is a movie that will make you appreciate the beauty of life and love.

              - -

              If you want to watch Fanaa online or download it in 300MB size, you can use the options mentioned above. However, be careful of the risks involved in using torrent sites or third-party websites. Alternatively, you can stream the movie on legal platforms like Amazon Prime Video, Netflix, Hotstar, or YouTube.

              - -

              Download Fanaa full movie in 300MB today and enjoy this masterpiece of cinema.

              3cee63e6c2
              -
              -
              \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Opticut Pro Pp 5.20b Multilingua.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Opticut Pro Pp 5.20b Multilingua.md deleted file mode 100644 index 2e6a4d4e44b525a68e06a2d7637c096928e20665..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Opticut Pro Pp 5.20b Multilingua.md +++ /dev/null @@ -1,10 +0,0 @@ -

              Opticut Pro Pp 5.20b Multilingua


              Download File ✵✵✵ https://cinurl.com/2uEYte



              - -Relax, and your dreams will come true. [Rootkits] | Opticut Pro Pp 5.20b Multilingual Crack With Keygen | Download Opticut Pro Pp 5.20b Multilingual Crack With Keygen | OPTICTUT – Opticut Pp 5.20b Multilinguado Crackeado: opticut.softcody[at]gmail.com - -Opticut Pro Pp 5.20b Multilingual Download: - -FREE DOWNLOAD from Opticut Pro Pp 5.20b Multilingual: Opticut Pro Pp 5.20b Multilingual | Opticut Pro Pp 5.20b Multilingual + Opticut Pro Pp 5.20b Multilingual (Sobre) | Script Opticut Pro Pp 5.20b Multilingual - Como Usar Opticut Pro Pp 5.20b Multilingual With Serial Key (Español) | Java Opticut Pro Pp 5.20b Multilingual + Opticut Pro Pp 5.20b Multilingual (Sobre) |.Net Opticut Pro Pp 5.20b Multilingual | PHP Opticut Pro Pp 5.20b Multilingual | English Opticut Pro Pp 5.20b Multilingual | Windows Opticut Pro Pp 5.20b Multilingual | Asp.Net Opticut Pro Pp 5.20b Multilingual | C# Opticut Pro Pp 5.20b Multilingual | C++ Opticut Pro Pp 5.20b Multilingual | Delphi Opticut Pro Pp 5.20b Multilingual | Lua Opticut Pro Pp 5.20b Multilingual | Python Opticut Pro Pp 5.20b Multilingual | Ruby Opticut Pro Pp 5.20b Multilingual | C/C++ Opticut Pro Pp 5.20b Multilingual | Visual Basic Opticut Pro Pp 5.20b Multilingual | Visual C Opticut Pro Pp 5.20b Multilingual | Delphi v3.0 Opticut Pro Pp 5.20b Multilingual | Ruby Opticut Pro Pp 5.20b Multilingual + Opticut Pro Pp 5.20b Multilingual (Sobre) | Opticut Pro Pp 5.20b Multilingual | Opticut Pro Pp 5.20b Multilingual | Opticut Pro Pp 5.20b Multilingual | Opticut Pro Pp 4fefd39f24
              -
              -
              -

              diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Sardu 3 0 Keygen 12instmankl NEW.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Sardu 3 0 Keygen 12instmankl NEW.md deleted file mode 100644 index 7e2aca81173e21d71396bbc3e7c8a6c4b3ed857b..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Sardu 3 0 Keygen 12instmankl NEW.md +++ /dev/null @@ -1,65 +0,0 @@ -
              -

              Sardu 3 0 Keygen 12instmankl: A Review of the Multiboot Creator Software

              - -

              If you are looking for a software that can help you create a multiboot device, such as a USB flash drive or a CD-DVD, that can run multiple operating systems and tools, you might want to check out Sardu 3 0 Keygen 12instmankl. This is a free and powerful software that can quickly build a multiboot device without requiring any user's knowledge in computer.

              -

              Sardu 3 0 Keygen 12instmankl


              DOWNLOAD ✒ ✒ ✒ https://cinurl.com/2uEY6N



              - -

              In this article, we will review Sardu 3 0 Keygen 12instmankl and show you how to download, install and use it to create your own multiboot device.

              - -

              What is Sardu 3 0 Keygen 12instmankl?

              - -

              Sardu 3 0 Keygen 12instmankl is a software that can create a multiboot device that can run multiple operating systems and tools from a single device. It supports various types of devices, such as USB flash drives, CD-DVDs, external hard drives and memory cards. It also supports various types of operating systems and tools, such as Windows, Linux, Mac OS, antivirus, recovery, diagnostic and utility software.

              - -

              Sardu 3 0 Keygen 12instmankl is designed for users who want to have a portable and versatile device that can run different operating systems and tools without requiring installation or configuration. It is also useful for users who want to test or repair their computers or devices with different software.

              - -

              How to Download and Install Sardu 3 0 Keygen 12instmankl?

              - -

              The first step to use Sardu 3 0 Keygen 12instmankl is to download it from a reliable source, such as SuprBay or vilchiemaling.mystrikingly.com. The file size is only about 30 MB and it does not require any installation. You just need to unzip the file and run the executable.

              -

              - -

              Before you run Sardu 3 0 Keygen 12instmankl, make sure you have administrator privileges on your computer, as you will need them to make changes to your device. Also, it is recommended that you backup your data and create a system restore point in case something goes wrong.

              - -

              How to Use Sardu 3 0 Keygen 12instmankl?

              - -

              When you run Sardu 3 0 Keygen 12instmankl, you will see a simple interface with all the information and options you need to create your multiboot device. At the top of the window, you will see the tabs for different types of devices, such as USB, CD-DVD or ISO. At the bottom of the window, you will see the tabs for different types of operating systems and tools, such as Windows, Linux, Mac OS or antivirus.

              - -

              To create your multiboot device with Sardu 3 0 Keygen

              -

              multiboot device with Sardu 3 0 Keygen 12instmankl, you can use it to run different operating systems and tools from a single device. To use your multiboot device, you need to follow these steps:

              - -
                -
              1. Restart your computer and enter the boot menu. The boot menu is usually accessed by pressing a key such as F12, F10 or Esc during the startup process. The key may vary depending on your computer model and BIOS settings.
              2. -
              3. Select your multiboot device from the boot menu and press Enter. Your multiboot device will load and show you a menu with all the software you added.
              4. -
              5. Select the software you want to run from the menu and press Enter. The software will load and run from your multiboot device.
              6. -
              7. To switch between different software, you need to restart your computer and repeat the steps above.
              8. -
              - -

              Benefits of Using Sardu 3 0 Keygen 12instmankl

              - -

              Sardu 3 0 Keygen 12instmankl is a free and powerful software that can help you create a multiboot device that can run multiple operating systems and tools from a single device. Here are some of the benefits of using this software:

              - -
                -
              • It is easy to use and has a simple interface with all the information and options you need.
              • -
              • It supports various types of devices, such as USB flash drives, CD-DVDs, external hard drives and memory cards.
              • -
              • It supports various types of operating systems and tools, such as Windows, Linux, Mac OS, antivirus, recovery, diagnostic and utility software.
              • -
              • It downloads the software from the official sources and adds it to your device automatically.
              • -
              • It creates a multiboot device that can run different operating systems and tools without requiring installation or configuration.
              • -
              - -

              Sardu 3 0 Keygen 12instmankl is a great tool for users who want to have a portable and versatile device that can run different operating systems and tools without requiring installation or configuration. It is also useful for users who want to test or repair their computers or devices with different software.

              - -

              Conclusion

              - -

              Sardu 3 0 Keygen 12instmankl is a free and powerful software that can help you create a multiboot device that can run multiple operating systems and tools from a single device. It is easy to use and has a simple interface with all the information and options you need. However, it is not recommended for beginners or inexperienced users, as creating a multiboot device can be risky and may damage your device if done incorrectly.

              - -

              If you want to create a multiboot device with Sardu 3 0 Keygen 12instmankl, make sure you do some research before you start adding software to your device. Also, backup your data and create a system restore point in case something goes wrong. And always test your multiboot device with different software to ensure compatibility and functionality.

              - -

              With Sardu 3 0 Keygen 12instmankl, you can create a multiboot device that can run multiple operating systems and tools from a single device and enjoy a portable and versatile PC experience.

              -

              Conclusion

              - -

              Sardu 3 0 Keygen 12instmankl is a free and powerful software that can help you create a multiboot device that can run multiple operating systems and tools from a single device. It is easy to use and has a simple interface with all the information and options you need. However, it is not recommended for beginners or inexperienced users, as creating a multiboot device can be risky and may damage your device if done incorrectly.

              - -

              If you want to create a multiboot device with Sardu 3 0 Keygen 12instmankl, make sure you do some research before you start adding software to your device. Also, backup your data and create a system restore point in case something goes wrong. And always test your multiboot device with different software to ensure compatibility and functionality.

              - -

              With Sardu 3 0 Keygen 12instmankl, you can create a multiboot device that can run multiple operating systems and tools from a single device and enjoy a portable and versatile PC experience.

              3cee63e6c2
              -
              -
              \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Snooper Pro 3.2.3 Patch.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Snooper Pro 3.2.3 Patch.md deleted file mode 100644 index d5d23e54e4a87590ae0db4aadb1b7932ec2f0433..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Snooper Pro 3.2.3 Patch.md +++ /dev/null @@ -1,36 +0,0 @@ -
              -

              How to Download and Install Snooper Pro 3.2.3 Patch for Free

              -

              Snooper Pro is a powerful and versatile sound recorder software that can capture any sound from your PC or microphone. Whether you want to record voice calls, music, lectures, or ambient sounds, Snooper Pro can do it all with high quality and ease.

              -

              Snooper Pro 3.2.3 patch


              Download ✺✺✺ https://cinurl.com/2uEY14



              -

              However, Snooper Pro is not a free software and you need to purchase a license key to use it without limitations. If you don't want to spend money on Snooper Pro, you might be tempted to look for a crack or a patch that can bypass the activation process and unlock all the features of Snooper Pro.

              -

              But be careful! Downloading and installing Snooper Pro 3.2.3 patch from unknown sources can be risky and dangerous. You might end up with a virus, malware, spyware, or ransomware that can harm your computer and compromise your privacy and security.

              -

              That's why we recommend you to download and install Snooper Pro 3.2.3 patch from our trusted and verified website. We have tested and scanned the patch with multiple antivirus programs and found it to be clean and safe. Our patch is also compatible with all versions of Windows and works flawlessly with Snooper Pro 3.2.3.

              -

              To download and install Snooper Pro 3.2.3 patch for free, follow these simple steps:

              -
                -
              1. Download Snooper Pro 3.2.3 patch from our website by clicking the link below.
              2. -
              3. Extract the zip file to a folder on your computer.
              4. -
              5. Run the patch.exe file as administrator.
              6. -
              7. Select the installation folder of Snooper Pro 3.2.3 on your computer.
              8. -
              9. Click on the "Patch" button and wait for the process to complete.
              10. -
              11. Enjoy using Snooper Pro 3.2.3 with full features and no limitations.
              12. -
              -

              That's it! You have successfully downloaded and installed Snooper Pro 3.2.3 patch for free. Now you can record any sound you want with Snooper Pro without any hassle or restriction.

              -

              -

              If you like our patch, please share it with your friends and family who might also benefit from it. And don't forget to check out our other patches for popular software on our website.

              - -

              But what exactly can you do with Snooper Pro 3.2.3? Here are some of the features and benefits of using this amazing sound recorder software:

              -
                -
              • You can record any sound from your PC or microphone in MP3, WAV, OGG, or WMA formats.
              • -
              • You can schedule recordings to start and stop automatically at a specific time or date.
              • -
              • You can use voice activation to start and stop recordings when sound is detected.
              • -
              • You can use noise suppression to filter out unwanted background noises.
              • -
              • You can edit and enhance your recordings with built-in audio effects and tools.
              • -
              • You can burn your recordings to CD or DVD with the integrated disc burner.
              • -
              • You can upload your recordings to FTP, Dropbox, Google Drive, or OneDrive with the integrated uploader.
              • -
              • You can encrypt your recordings with password protection and secure deletion.
              • -
              • You can customize Snooper Pro 3.2.3 with various skins and languages.
              • -
              -

              As you can see, Snooper Pro 3.2.3 is a must-have software for anyone who wants to record any sound from their PC or microphone. And with our patch, you can get it for free and enjoy all its features without any limitation.

              -

              So what are you waiting for? Download and install Snooper Pro 3.2.3 patch from our website today and start recording anything you want with Snooper Pro.

              d5da3c52bf
              -
              -
              \ No newline at end of file diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/models/decode_heads/decode_head.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/models/decode_heads/decode_head.py deleted file mode 100644 index 88a661b8f6fec5d4c031d3d85e80777ee63951a6..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/models/decode_heads/decode_head.py +++ /dev/null @@ -1,234 +0,0 @@ -from abc import ABCMeta, abstractmethod - -import torch -import torch.nn as nn -from annotator.uniformer.mmcv.cnn import normal_init -from annotator.uniformer.mmcv.runner import auto_fp16, force_fp32 - -from annotator.uniformer.mmseg.core import build_pixel_sampler -from annotator.uniformer.mmseg.ops import resize -from ..builder import build_loss -from ..losses import accuracy - - -class BaseDecodeHead(nn.Module, metaclass=ABCMeta): - """Base class for BaseDecodeHead. - - Args: - in_channels (int|Sequence[int]): Input channels. - channels (int): Channels after modules, before conv_seg. - num_classes (int): Number of classes. - dropout_ratio (float): Ratio of dropout layer. Default: 0.1. - conv_cfg (dict|None): Config of conv layers. Default: None. - norm_cfg (dict|None): Config of norm layers. Default: None. - act_cfg (dict): Config of activation layers. - Default: dict(type='ReLU') - in_index (int|Sequence[int]): Input feature index. Default: -1 - input_transform (str|None): Transformation type of input features. - Options: 'resize_concat', 'multiple_select', None. - 'resize_concat': Multiple feature maps will be resize to the - same size as first one and than concat together. - Usually used in FCN head of HRNet. - 'multiple_select': Multiple feature maps will be bundle into - a list and passed into decode head. - None: Only one select feature map is allowed. - Default: None. - loss_decode (dict): Config of decode loss. - Default: dict(type='CrossEntropyLoss'). - ignore_index (int | None): The label index to be ignored. When using - masked BCE loss, ignore_index should be set to None. Default: 255 - sampler (dict|None): The config of segmentation map sampler. - Default: None. - align_corners (bool): align_corners argument of F.interpolate. - Default: False. - """ - - def __init__(self, - in_channels, - channels, - *, - num_classes, - dropout_ratio=0.1, - conv_cfg=None, - norm_cfg=None, - act_cfg=dict(type='ReLU'), - in_index=-1, - input_transform=None, - loss_decode=dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - ignore_index=255, - sampler=None, - align_corners=False): - super(BaseDecodeHead, self).__init__() - self._init_inputs(in_channels, in_index, input_transform) - self.channels = channels - self.num_classes = num_classes - self.dropout_ratio = dropout_ratio - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - self.in_index = in_index - self.loss_decode = build_loss(loss_decode) - self.ignore_index = ignore_index - self.align_corners = align_corners - if sampler is not None: - self.sampler = build_pixel_sampler(sampler, context=self) - else: - self.sampler = None - - self.conv_seg = nn.Conv2d(channels, num_classes, kernel_size=1) - if dropout_ratio > 0: - self.dropout = nn.Dropout2d(dropout_ratio) - else: - self.dropout = None - self.fp16_enabled = False - - def extra_repr(self): - """Extra repr.""" - s = f'input_transform={self.input_transform}, ' \ - f'ignore_index={self.ignore_index}, ' \ - f'align_corners={self.align_corners}' - return s - - def _init_inputs(self, in_channels, in_index, input_transform): - """Check and initialize input transforms. - - The in_channels, in_index and input_transform must match. - Specifically, when input_transform is None, only single feature map - will be selected. So in_channels and in_index must be of type int. - When input_transform - - Args: - in_channels (int|Sequence[int]): Input channels. - in_index (int|Sequence[int]): Input feature index. - input_transform (str|None): Transformation type of input features. - Options: 'resize_concat', 'multiple_select', None. - 'resize_concat': Multiple feature maps will be resize to the - same size as first one and than concat together. - Usually used in FCN head of HRNet. - 'multiple_select': Multiple feature maps will be bundle into - a list and passed into decode head. - None: Only one select feature map is allowed. - """ - - if input_transform is not None: - assert input_transform in ['resize_concat', 'multiple_select'] - self.input_transform = input_transform - self.in_index = in_index - if input_transform is not None: - assert isinstance(in_channels, (list, tuple)) - assert isinstance(in_index, (list, tuple)) - assert len(in_channels) == len(in_index) - if input_transform == 'resize_concat': - self.in_channels = sum(in_channels) - else: - self.in_channels = in_channels - else: - assert isinstance(in_channels, int) - assert isinstance(in_index, int) - self.in_channels = in_channels - - def init_weights(self): - """Initialize weights of classification layer.""" - normal_init(self.conv_seg, mean=0, std=0.01) - - def _transform_inputs(self, inputs): - """Transform inputs for decoder. - - Args: - inputs (list[Tensor]): List of multi-level img features. - - Returns: - Tensor: The transformed inputs - """ - - if self.input_transform == 'resize_concat': - inputs = [inputs[i] for i in self.in_index] - upsampled_inputs = [ - resize( - input=x, - size=inputs[0].shape[2:], - mode='bilinear', - align_corners=self.align_corners) for x in inputs - ] - inputs = torch.cat(upsampled_inputs, dim=1) - elif self.input_transform == 'multiple_select': - inputs = [inputs[i] for i in self.in_index] - else: - inputs = inputs[self.in_index] - - return inputs - - @auto_fp16() - @abstractmethod - def forward(self, inputs): - """Placeholder of forward function.""" - pass - - def forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg): - """Forward function for training. - Args: - inputs (list[Tensor]): List of multi-level img features. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmseg/datasets/pipelines/formatting.py:Collect`. - gt_semantic_seg (Tensor): Semantic segmentation masks - used if the architecture supports semantic segmentation task. - train_cfg (dict): The training config. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - seg_logits = self.forward(inputs) - losses = self.losses(seg_logits, gt_semantic_seg) - return losses - - def forward_test(self, inputs, img_metas, test_cfg): - """Forward function for testing. - - Args: - inputs (list[Tensor]): List of multi-level img features. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmseg/datasets/pipelines/formatting.py:Collect`. - test_cfg (dict): The testing config. - - Returns: - Tensor: Output segmentation map. - """ - return self.forward(inputs) - - def cls_seg(self, feat): - """Classify each pixel.""" - if self.dropout is not None: - feat = self.dropout(feat) - output = self.conv_seg(feat) - return output - - @force_fp32(apply_to=('seg_logit', )) - def losses(self, seg_logit, seg_label): - """Compute segmentation loss.""" - loss = dict() - seg_logit = resize( - input=seg_logit, - size=seg_label.shape[2:], - mode='bilinear', - align_corners=self.align_corners) - if self.sampler is not None: - seg_weight = self.sampler.sample(seg_logit, seg_label) - else: - seg_weight = None - seg_label = seg_label.squeeze(1) - loss['loss_seg'] = self.loss_decode( - seg_logit, - seg_label, - weight=seg_weight, - ignore_index=self.ignore_index) - loss['acc_seg'] = accuracy(seg_logit, seg_label) - return loss diff --git a/spaces/szk1ck/image-matting/app.py b/spaces/szk1ck/image-matting/app.py deleted file mode 100644 index 56a9e3928d5d2f7d1cfa31a7e333cd6b0378661f..0000000000000000000000000000000000000000 --- a/spaces/szk1ck/image-matting/app.py +++ /dev/null @@ -1,276 +0,0 @@ -from zipfile import ZipFile -import numpy as np - -from PIL import Image -import sys, os -import subprocess - -def install_gradio(): - subprocess.check_call([sys.executable, "-m", "pip", "install", "gradio==3.23.0"]) - -# Gradioのインストールを実行 -install_gradio() -import os, shutil - -import gradio as gr -from rembg import remove -from utils import functions -from utils.functions import complete, clean, clean_by_name, get_random_name - -import os, sys -import argparse - -from logging import getLogger, StreamHandler, DEBUG -logger = getLogger(__name__) -handler = StreamHandler(); handler.setLevel(DEBUG) -logger.setLevel(DEBUG) -logger.addHandler(handler) -logger.propagate = False - - - -def run_rembg(img): - output = remove(img) - output_pil = Image.fromarray(output) - - # Remove margins - cropped_image = output_pil.crop(output_pil.getbbox()) - - return cropped_image - - -def from_zip(inputs): - work_dir = get_random_name() - os.makedirs(work_dir, exist_ok=True) - - image_data_dict = {} - with ZipFile(inputs[0].name, "r") as zip_file: - image_names = zip_file.namelist() - - prefix = "" - for name in image_names: - if prefix=="": - prefix = name.split("/")[0] - else: - break - - image_files = [] - - for image_name in image_names: - if image_name[-3:] in "pngjpg": - try: - with zip_file.open(image_name) as f: - image = Image.open(f) - image_files.append(image_name) - - image_array = np.array(image) - # logger.debug(f"image name : {image_name}") - category_dir = image_name.split("/")[0] - # image_name = image_name.split("/")[1] - os.makedirs(f"{work_dir}/{category_dir}", exist_ok=True) - image_data_dict[image_name] = image_array - - except Exception as e: - logger.info(f"Exception : {e}") - - - for image_name, image_data in image_data_dict.items(): - - output = remove(image_data) - output_pil = Image.fromarray(output) - # Remove margins - cropped_image = output_pil.crop(output_pil.getbbox()) - - image_name = image_name.replace("jpg", "png") - cropped_image.save(f"{work_dir}/{image_name}") - - shutil.make_archive(work_dir, "zip", work_dir) - shutil.rmtree(work_dir) - - return f"{work_dir}.zip", complete(work_dir) - - -def from_image_files(images, text_class_name): - - if not text_class_name=="": - dir_name = text_class_name - else: - dir_name = functions.get_random_name() - - os.makedirs(dir_name, exist_ok=True) - - for image in images: - image_name = image.name - # logger.debug(f"image name : {image_name}") - - # 読み込み - image_data = np.array(Image.open(image_name)) - - output = remove(image_data) - output_pil = Image.fromarray(output) - # Remove margins - cropped_image = output_pil.crop(output_pil.getbbox()) - - image_name = image_name.split("/")[-1] - image_name = image_name[:image_name.find("_", image_name.find("_") + 1)] + ".png" - # logger.debug(f"save image name : {image_name}") - cropped_image.save(f"{dir_name}/{image_name}") - - shutil.make_archive(f"{dir_name}", "zip", f"{dir_name}") - shutil.rmtree(f"{dir_name}") - - return f"{dir_name}.zip", complete("complete")+"+"+dir_name - - - -if __name__=="__main__": - - custom_theme = gr.themes.Soft( - neutral_hue="sky", - ) - - with gr.Blocks( - theme=custom_theme, - # css=".gradio-container {\ - # background: url('file=./assets/codioful-formerly-gradienta-bKESVqfxass-unsplash.jpg')\ - # }" - ) as demo: - - - with gr.Tab("Images"): - gr.Markdown( - """ -

              Image Matting using U2-Net

              - """ - ) - with gr.Row(): - gr.Markdown( - """ - ### Input Image Files - - """ - ) - gr.Markdown( - """ - ### Output Zip File - - """ - ) - - with gr.Row(): - with gr.Column(): - text_class_name = gr.Textbox(label="Class Name", value="", placeholder="cat") - image_input = gr.File(file_count="multiple") - image_output = gr.File() - text_output = gr.Textbox(visible=False) - - btn = gr.Button("Run!") - - btn.click( - fn=from_image_files, - inputs=[image_input, text_class_name], - outputs=[image_output, text_output] - ) - text_output.change( - fn=clean_by_name, - inputs=text_output, - outputs=text_output - - ) - - - with gr.Tab("Zip"): - gr.Markdown( - """ -

              Image Matting using U2-Net

              - """ - ) - with gr.Row(): - gr.Markdown( - """ - ### Input Zip File - - Zip file can include multiple directories. - """ - ) - gr.Markdown( - """ - ### Output Zip File - - If input has multiple directories, output has the same multiple diretocories. - """ - ) - - with gr.Row(): - image_input = gr.File(file_count="multiple") - image_output = gr.File() - text_output = gr.Textbox(visible=False, value="idle_state") - - btn = gr.Button("Run!") - - btn.click( - fn=from_zip, - inputs=image_input, - outputs=[image_output, text_output] - ) - text_output.change( - fn=clean, - inputs=text_output, - outputs=text_output - ) - - - with gr.Tab("Image"): - gr.Markdown( - """ -

              Image Matting using U2-Net

              - """ - ) - with gr.Row(): - gr.Markdown( - """ - ### Input Image - - """ - ) - - gr.Markdown( - """ - ### Output Image - - """ - ) - with gr.Row(): - image_input = gr.Image(type="numpy") - image_output = gr.Image(type="pil") - - btn = gr.Button("Run!") - - - btn.click( - fn=run_rembg, - inputs=image_input, - outputs=image_output, - api_name="imageMatting" - ) - - - - - gr.Markdown( - """ - --- - Acknowledgments - - Library - - Library Git hub : [danielgatis/rembg](https://github.com/danielgatis/rembg) - - Cloned on 2023/3/12 - - Algorithm - - Library Git hub : [U2-Net](https://github.com/xuebinqin/U-2-Net) - - Image - - Cat Image from [Pixabay](https://pixabay.com/images/id-3038243/) - """ - ) - - demo.launch( - favicon_path="./assets/ハサミのフリーアイコン.png" - ) diff --git a/spaces/szukevin/VISOR-GPT/train/tencentpretrain/encoders/dual_encoder.py b/spaces/szukevin/VISOR-GPT/train/tencentpretrain/encoders/dual_encoder.py deleted file mode 100644 index 1dcf1e6f46f223dd12aef9474c2d9556b4675d4e..0000000000000000000000000000000000000000 --- a/spaces/szukevin/VISOR-GPT/train/tencentpretrain/encoders/dual_encoder.py +++ /dev/null @@ -1,47 +0,0 @@ -from argparse import Namespace -import torch.nn as nn -import copy - - -class DualEncoder(nn.Module): - """ - Dual Encoder which enables siamese models like SBER and CLIP. - """ - def __init__(self, args): - super(DualEncoder, self).__init__() - from tencentpretrain.encoders import str2encoder - - stream_0_args = copy.deepcopy(vars(args)) - stream_0_args.update(args.stream_0) - stream_0_args = Namespace(**stream_0_args) - self.encoder_0 = str2encoder[stream_0_args.encoder](stream_0_args) - - stream_1_args = copy.deepcopy(vars(args)) - stream_1_args.update(args.stream_1) - stream_1_args = Namespace(**stream_1_args) - self.encoder_1 = str2encoder[stream_1_args.encoder](stream_1_args) - - if args.tie_weights: - self.encoder_1 = self.encoder_0 - - def forward(self, emb, seg): - """ - Args: - emb: ([batch_size x seq_length x emb_size], [batch_size x seq_length x emb_size]) - seg: ([batch_size x seq_length], [batch_size x seq_length]) - Returns: - features_0: [batch_size x seq_length x hidden_size] - features_1: [batch_size x seq_length x hidden_size] - """ - features_0 = self.get_encode_0(emb[0], seg[0]) - features_1 = self.get_encode_1(emb[1], seg[1]) - - return features_0, features_1 - - def get_encode_0(self, emb, seg): - features = self.encoder_0(emb, seg) - return features - - def get_encode_1(self, emb, seg): - features = self.encoder_1(emb, seg) - return features diff --git a/spaces/taneemishere/html-code-generation-from-images-with-deep-neural-networks/classes/Vocabulary.py b/spaces/taneemishere/html-code-generation-from-images-with-deep-neural-networks/classes/Vocabulary.py deleted file mode 100644 index 3b79c96dbf5200852ece221cdd9a60bfbf0865ab..0000000000000000000000000000000000000000 --- a/spaces/taneemishere/html-code-generation-from-images-with-deep-neural-networks/classes/Vocabulary.py +++ /dev/null @@ -1,78 +0,0 @@ -__author__ = 'Taneem Jan, taneemishere.github.io' - -import sys -import numpy as np - -START_TOKEN = "" -END_TOKEN = "" -PLACEHOLDER = " " -SEPARATOR = '->' - - -class Vocabulary: - def __init__(self): - self.binary_vocabulary = {} - self.vocabulary = {} - self.token_lookup = {} - self.size = 0 - - self.append(START_TOKEN) - self.append(END_TOKEN) - self.append(PLACEHOLDER) - - def append(self, token): - if token not in self.vocabulary: - self.vocabulary[token] = self.size - self.token_lookup[self.size] = token - self.size += 1 - - def create_binary_representation(self): - if sys.version_info >= (3,): - items = self.vocabulary.items() - else: - items = self.vocabulary.iteritems() - for key, value in items: - binary = np.zeros(self.size) - binary[value] = 1 - self.binary_vocabulary[key] = binary - - def get_serialized_binary_representation(self): - if len(self.binary_vocabulary) == 0: - self.create_binary_representation() - - string = "" - if sys.version_info >= (3,): - items = self.binary_vocabulary.items() - else: - items = self.binary_vocabulary.iteritems() - for key, value in items: - array_as_string = np.array2string(value, separator=',', max_line_width=self.size * self.size) - string += "{}{}{}\n".format(key, SEPARATOR, array_as_string[1:len(array_as_string) - 1]) - return string - - def save(self, path): - output_file_name = "{}/words.vocab".format(path) - output_file = open(output_file_name, 'w') - output_file.write(self.get_serialized_binary_representation()) - output_file.close() - - def retrieve(self, path): - input_file = open("{}/words.vocab".format(path), 'r') - buffer = "" - for line in input_file: - try: - separator_position = len(buffer) + line.index(SEPARATOR) - buffer += line - key = buffer[:separator_position] - value = buffer[separator_position + len(SEPARATOR):] - value = np.fromstring(value, sep=',') - - self.binary_vocabulary[key] = value - self.vocabulary[key] = np.where(value == 1)[0][0] - self.token_lookup[np.where(value == 1)[0][0]] = key - - buffer = "" - except ValueError: - buffer += line - input_file.close() - self.size = len(self.vocabulary) diff --git a/spaces/thinh-huynh-re/webrtc/INSTALLATION.md b/spaces/thinh-huynh-re/webrtc/INSTALLATION.md deleted file mode 100644 index 985de3ad6ac13a819d5ea0c623926055c92e8c1a..0000000000000000000000000000000000000000 --- a/spaces/thinh-huynh-re/webrtc/INSTALLATION.md +++ /dev/null @@ -1,14 +0,0 @@ -# Installation guide - -## Environments - -- Python 3.10 venv -- Ubuntu 22 - -## Ubuntu requirements - -- `sudo apt-get install python3.10-dev build-essential ffmpeg` - -## Setup Turn Server - -- Use [Turn Server](https://www.expressturn.com) diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Death Race 2 Tamil Dubbed Movie Free Download.md b/spaces/tialenAdioni/chat-gpt-api/logs/Death Race 2 Tamil Dubbed Movie Free Download.md deleted file mode 100644 index a6c2725488d98b7fc9a84c9628c9f8976bb625e3..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Death Race 2 Tamil Dubbed Movie Free Download.md +++ /dev/null @@ -1,15 +0,0 @@ - -Here is a possible title and article with html formatting for the keyword "Death Race 2 Tamil Dubbed Movie Free Download": - -

              Death Race 2: A Prequel to the Action-Packed Sci-Fi Thriller

              -

              Death Race 2 is a 2010 American science fiction action film that serves as a prequel to the 2008 film Death Race. Directed by Roel Reiné and written by Tony Giglio and Paul W. S. Anderson, the film stars Luke Goss, Tanit Phoenix, Sean Bean, Ving Rhames, Danny Trejo and Lauren Cohan. The film follows the origins of the Death Race, a brutal car combat competition where inmates fight for their freedom.

              -

              The film was released directly to video on 27 December 2010 and received mixed reviews from critics. However, it was a commercial success, earning more than $36 million from DVD and Blu-ray sales. The film was dubbed in Tamil and is available for free download on various websites such as isaimini5.com[^1^]. However, downloading pirated movies is illegal and may result in legal action.

              -

              Death Race 2 Tamil Dubbed Movie Free Download


              Downloadhttps://urlcod.com/2uK9Jv



              -

              If you are a fan of action-packed sci-fi thrillers, you may enjoy watching Death Race 2. The film features spectacular car stunts, explosive violence, and a gripping storyline that sets the stage for the events of Death Race. You can watch the trailer of the film here: https://www.youtube.com/watch?v=9KtY-cwUQKA

              Here is a possible continuation of the article with html formatting for the keyword "Death Race 2 Tamil Dubbed Movie Free Download": - -

              Death Race 2 features a talented cast of actors who portray the characters involved in the Death Race. Luke Goss plays Carl "Luke" Lucas, a skilled driver who becomes the first "Frankenstein", a masked racer who is feared and revered by the viewers. Tanit Phoenix plays Katrina Banks, Luke's navigator and love interest, who helps him survive the race. Sean Bean plays Markus Kane, Luke's former boss who wants him dead for betraying him. Ving Rhames plays Weyland, the owner of the Weyland Corporation who oversees the Death Race. Danny Trejo plays Goldberg, Luke's loyal mechanic and friend. Lauren Cohan plays September Jones, the ruthless host of the Death Match and the Death Race, who will stop at nothing to boost her ratings. Robin Shou plays 14K, a Triad member and a skilled racer who also appears in Death Race. Fred Koehler plays Lists, a nerdy inmate who becomes Luke's ally and also appears in Death Race.

              -

              The film also introduces some of the other racers who compete in the first Death Race, such as Big Bill (Deobia Oparei), Rocco (Joe Vaz), Apache (Chase Armitage), The Sheik (Michael Solomon) and Scarface (Trayan Milenov-Troy). Their cars are later driven by different racers in Death Race.

              -

              Death Race 2 is a thrilling and action-packed film that explores the origins of the most brutal sport in the world. The film is full of twists and turns that will keep you on the edge of your seat. The film also has some dark humor and romance that add to its appeal. If you are looking for a fast-paced and entertaining film that will make your adrenaline pump, you should watch Death Race 2.

              -

              7196e7f11a
              -
              -
              \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Flobo Hard Disk Repair 41 Full Crack Idm The Best Tool for Monitoring and Predicting Hard Drive Failure.md b/spaces/tialenAdioni/chat-gpt-api/logs/Flobo Hard Disk Repair 41 Full Crack Idm The Best Tool for Monitoring and Predicting Hard Drive Failure.md deleted file mode 100644 index d60d00acca4fd2bbb04a756fa4e60c045a997fdd..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Flobo Hard Disk Repair 41 Full Crack Idm The Best Tool for Monitoring and Predicting Hard Drive Failure.md +++ /dev/null @@ -1,140 +0,0 @@ - -
              - What are the features and benefits of Flobo Hard Disk Repair 4.1 Full Crack Idm?
              - How to download and install Flobo Hard Disk Repair 4.1 Full Crack Idm?
              - How to use Flobo Hard Disk Repair 4.1 Full Crack Idm to fix hard disk errors and bad sectors?
              - Conclusion: Summarize the main points and provide a call to action | | H2: What is Flobo Hard Disk Repair and why do you need it? | - Explain what Flobo Hard Disk Repair is and how it works
              - Explain what are bad sectors and how they affect your hard disk performance and data security
              - Explain why you need Flobo Hard Disk Repair to repair your hard disk and prevent data loss | | H2: What are the features and benefits of Flobo Hard Disk Repair 4.1 Full Crack Idm? | - List the main features of Flobo Hard Disk Repair 4.1 Full Crack Idm, such as:
              - Bad sector repair tool
              - Test speed
              - S.M.A.R.T. information
              - Surface test
              - Check media stability
              - Controller test
              - Explain how each feature helps you improve your hard disk health and performance
              - Highlight the benefits of using Flobo Hard Disk Repair 4.1 Full Crack Idm, such as:
              - Free download and easy installation
              - Compatible with Windows XP, Vista, 7, 8, and 10
              - Supports FAT12, FAT16, FAT32, NTFS, NTFS5 file systems
              - Supports IDE, SATA, SCSI, USB hard disks
              - User-friendly interface and simple operation | | H2: How to download and install Flobo Hard Disk Repair 4.1 Full Crack Idm? | - Provide a step-by-step guide on how to download and install Flobo Hard Disk Repair 4.1 Full Crack Idm from a reliable source
              - Include screenshots or images to illustrate each step
              - Warn the users about the potential risks of downloading cracked software from untrusted sources | | H2: How to use Flobo Hard Disk Repair 4.1 Full Crack Idm to fix hard disk errors and bad sectors? | - Provide a step-by-step guide on how to use Flobo Hard Disk Repair 4.1 Full Crack Idm to scan and repair your hard disk
              - Include screenshots or images to illustrate each step
              - Explain what each option and result means and how to interpret them
              - Provide tips and best practices on how to maintain your hard disk health and avoid future problems | | H2: Conclusion: Summarize the main points and provide a call to action | - Summarize the main points of the article, such as:
              - What is Flobo Hard Disk Repair and why do you need it?
              - What are the features and benefits of Flobo Hard Disk Repair 4.1 Full Crack Idm?
              - How to download and install Flobo Hard Disk Repair 4.1 Full Crack Idm?
              - How to use Flobo Hard Disk Repair 4.1 Full Crack Idm to fix hard disk errors and bad sectors?
              - Provide a call to action for the readers, such as:
              - Download Flobo Hard Disk Repair 4.1 Full Crack Idm today and enjoy a faster and safer hard disk performance!
              - Share this article with your friends who might need it!
              - Leave a comment below if you have any questions or feedback! | **Table 2: Article with HTML formatting** ```html

              Flobo Hard Disk Repair 4.1 Full Crack Idm: What Is It and How to Use It?

              -

              If you are looking for a reliable and effective tool to fix your hard disk errors and bad sectors, you might have heard of Flobo Hard Disk Repair 4.1 Full Crack Idm. But what is it exactly and how can it help you improve your hard disk health and performance? In this article, we will answer these questions and more. We will explain what Flobo Hard Disk Repair is, what are its features and benefits, how to download and install it, and how to use it to scan and repair your hard disk. By the end of this article, you will have a clear idea of whether Flobo Hard Disk Repair 4.1 Full Crack Idm is the right solution for you.

              -

              What is Flobo Hard Disk Repair and why do you need it?

              -

              Flobo Hard Disk Repair is a program designed to fix hard disks damaged because of bad sectors . Bad sectors are areas on your hard disk that cannot be read or written by your computer due to physical damage or software errors . Bad sectors can cause various problems for your hard disk performance and data security, such as:

              -

              Flobo Hard Disk Repair 41 Full Crack Idm


              Download Zip –––––>>> https://urlcod.com/2uK5XG



              -
                -
              • Slow down your computer speed
              • -
              • Increase your hard disk noise
              • -
              • Cause data corruption or loss
              • -
              • Cause system crashes or freezes
              • -
              • Cause boot failures or blue screens
              • -
              -

              To prevent these problems from happening or worsening, you need a tool like Flobo Hard Disk Repair that can detect and repair bad sectors on your hard disk. Flobo Hard Disk Repair uses an advanced algorithm that scans your hard disk surface for bad sectors and attempts to recover them . It also provides other useful features that help you monitor and improve your hard disk health and performance.

              -

              What are the features and benefits of Flobo Hard Disk Repair 4.1 Full Crack Idm?

              -

              Flobo Hard Disk Repair 4.1 Full Crack Idm is a cracked version of Flobo Hard Disk Repair that allows you to use all its features for free without paying for a license . Some of the main features of Flobo Hard Disk Repair 4.1 Full Crack Idm are:

              -
                -
              • Bad sector repair tool: This is the core feature of Flobo Hard Disk Repair that scans your hard disk for bad sectors and tries to fix them . You can choose between two modes: quick scan or full scan.
              • -
              • Test speed: This feature tests your hard disk speed by measuring its read/write rate . You can compare your results with other users or with the manufacturer's specifications.
              • -
              • S.M.A.R.T. information: This feature displays the S.M.A.R.T. (Self-Monitoring, Analysis, and Reporting Technology) data of your hard disk . S.M.A.R.T. is a system that monitors various parameters of your hard disk health, such as temperature, spin-up time, reallocated sectors count, etc.
              • -
              • Surface test: This feature tests your hard disk surface by reading all its sectors . You can see a graphical representation of your hard disk surface with green (good), yellow (warning), or red (bad) colors.
              • -
              • Check media stability: This feature checks the stability of your hard disk media by reading random sectors . You can see how many errors occur during the test.
              • -
              • Controller test: This feature tests your hard disk controller by sending commands to it . You can see if your controller responds correctly or not.
              • -
              -

              The benefits of using Flobo Hard Disk Repair 4.1 Full Crack Idm are:

              -
                -
              • Free download and easy installation: You can download Flobo Hard Disk Repair 4.1 Full Crack Idm from a reliable source for free without paying for a license . The installation process is simple and fast.
              • -
              • Compatible with Windows XP, Vista, 7, 8, and 10: You can use Flobo Hard Disk Repair 4.1 Full Crack Idm on any Windows operating system from XP to 10 . It supports both 32-bit and 64-bit versions.
              • -```html Idm on any hard disk with any file system , such as FAT12, FAT16, FAT32, NTFS, or NTFS5. -
              • Supports IDE, SATA, SCSI, USB hard disks: You can use Flobo Hard Disk Repair 4.1 Full Crack Idm on any type of hard disk , such as IDE, SATA, SCSI, or USB.
              • -
              • User-friendly interface and simple operation: You can easily use Flobo Hard Disk Repair 4.1 Full Crack Idm with its intuitive and clear interface . You just need to select your hard disk and choose the feature you want to use.
              • -
              -

              How to download and install Flobo Hard Disk Repair 4.1 Full Crack Idm?

              -

              To download and install Flobo Hard Disk Repair 4.1 Full Crack Idm, you need to follow these steps:

              -
                -
              1. Go to this link and click on the "Download" button.
              2. -
              3. Save the file "Flobo_Hard_Disk_Repair_4.1_Full_Crack_Idm.zip" on your computer.
              4. -
              5. Extract the file using a program like WinRAR or 7-Zip.
              6. -
              7. Open the folder "Flobo_Hard_Disk_Repair_4.1_Full_Crack_Idm" and run the file "Flobo_Hard_Disk_Repair_4.1.exe" as administrator.
              8. -
              9. Follow the instructions on the screen to complete the installation.
              10. -
              11. Enjoy using Flobo Hard Disk Repair 4.1 Full Crack Idm!
              12. -
              -

              Download Flobo Hard Disk Repair 4.1 Full Crack Idm

              -

              Extract Flobo Hard Disk Repair 4.1 Full Crack Idm

              -

              Flobo HDD Repair 41 Full Version Crack Download
              -How to Fix Hard Disk Errors with Flobo Repair 41 Cracked
              -Flobo Hard Disk Repair 41 Serial Key Generator
              -Flobo Repair 41 Full Crack for Windows 10/8/7
              -Download Flobo Hard Disk Repair 41 with Crack and Patch
              -Flobo HDD Repair 41 License Key Activation
              -Flobo Hard Disk Repair 41 Crack Free Download Full Software
              -Flobo Repair 41 Cracked Version for Mac OS X
              -Flobo Hard Disk Repair 41 Registration Code and Keygen
              -Flobo HDD Repair 41 Full Crack Torrent Download
              -Flobo Hard Disk Repair 41 Review and Features
              -Flobo Repair 41 Full Crack + Portable Edition
              -Flobo Hard Disk Repair 41 Crack Only No Survey
              -Flobo HDD Repair 41 Full Crack for Linux
              -Flobo Hard Disk Repair 41 System Requirements and Compatibility
              -Flobo Repair 41 Full Crack + Setup File
              -Flobo Hard Disk Repair 41 Crack Direct Download Link
              -Flobo HDD Repair 41 Full Crack for Android
              -Flobo Hard Disk Repair 41 User Manual and Guide
              -Flobo Repair 41 Full Crack + Serial Number
              -Flobo Hard Disk Repair 41 Alternative and Similar Software
              -Flobo HDD Repair 41 Full Crack for iOS
              -Flobo Hard Disk Repair 41 Customer Support and Contact
              -Flobo Repair 41 Full Crack + Product Key
              -Flobo Hard Disk Repair 41 Discount and Coupon Code
              -Flobo HDD Repair 41 Full Crack for Chrome OS
              -Flobo Hard Disk Repair 41 Testimonials and Feedback
              -Flobo Repair 41 Full Crack + License Code
              -Flobo Hard Disk Repair 41 Comparison and Benchmark
              -Flobo HDD Repair 41 Full Crack for Windows XP/Vista/7/8/8.1/10
              -Flobo Hard Disk Repair 41 FAQ and Troubleshooting
              -Flobo Repair 41 Full Crack + Activation Code
              -Flobo Hard Disk Repair 41 Pros and Cons
              -Flobo HDD Repair 41 Full Crack for Ubuntu
              -Flobo Hard Disk Repair 41 Video Tutorial and Demo
              -Flobo Repair 41 Full Crack + Registration Key
              -Flobo Hard Disk Repair 41 Benefits and Advantages
              -Flobo HDD Repair 41 Full Crack for Fedora
              -Flobo Hard Disk Repair 41 Free Trial and Demo Version
              -Flobo Repair 41 Full Crack + Patch File
              -Flobo Hard Disk Repair 41 Drawbacks and Disadvantages
              -Flobo HDD Repair 41 Full Crack for Debian
              -Flobo Hard Disk Repair 41 Refund Policy and Guarantee
              -Flobo Repair 41 Full Crack + Key File
              -Flobo Hard Disk Repair 41 Tips and Tricks
              -Flobo HDD Repair 41 Full Crack for CentOS
              -Flobo Hard Disk Repair 41 Updates and Upgrades
              -Flobo Repair 41 Full Crack + Setup.exe File
              -Flobo Hard Disk Repair 41 Scam or Legit?

              -

              Run Flobo Hard Disk Repair 4.1 Full Crack Idm

              -

              Install Flobo Hard Disk Repair 4.1 Full Crack Idm

              -

              Use Flobo Hard Disk Repair 4.1 Full Crack Idm

              -

              How to use Flobo Hard Disk Repair 4.1 Full Crack Idm to fix hard disk errors and bad sectors?

              -

              To use Flobo Hard Disk Repair 4.1 Full Crack Idm to fix hard disk errors and bad sectors, you need to follow these steps:

              -
                -
              1. Launch Flobo Hard Disk Repair 4.1 Full Crack Idm from your desktop or start menu.
              2. -
              3. Select your hard disk from the list of available drives.
              4. -
              5. Choose the feature you want to use from the menu bar or the buttons below.
              6. -
              7. For example, if you want to use the bad sector repair tool, click on the "Repair" button.
              8. -
              9. Select the mode you want to use: quick scan or full scan . A quick scan will scan only the first and last sectors of your hard disk , while a full scan will scan all the sectors . A full scan will take longer but will be more thorough.
              10. -
              11. Click on the "Start Scan" button to begin the scanning process.
              12. -
              13. Wait for the scanning process to finish . You will see a progress bar and a log window that shows the details of the scan . You can pause or stop the scan at any time by clicking on the "Pause" or "Stop" buttons.
              14. -
              15. When the scanning process is finished , you will see a report that shows the number of bad sectors found and fixed . You can also see a graphical representation of your hard disk surface with green (good), yellow (warning), or red (bad) colors . You can zoom in or out by using the mouse wheel or the buttons below.
              16. -
              17. If there are any bad sectors that cannot be fixed , you will see a message that advises you to backup your data and replace your hard disk as soon as possible . You can also see a list of files that are affected by bad sectors by clicking on the "Show Files" button.
              18. -
              19. Click on the "Close" button to exit the bad sector repair tool.
              20. -
              -

              Select your hard disk

              -

              Choose the feature you want to use

              -

              Select the mode you want to use

              -

              Start the scanning process

              -

              Wait for the scanning process to finish

              -

              See the report and graphical representation

              -

              Conclusion: Summarize the main points and provide a call to action

              -

              In conclusion, Flobo Hard Disk Repair 4.1 Full Crack Idm is a powerful and easy-to-use tool that can help you fix your hard disk errors and bad sectors . It has many features and benefits that can improve your hard disk health and performance , such as:

              -
                -
              • Bad sector repair tool
              • -
              • Test speed
              • -
              • S.M.A.R.T. information
              • -
              • Surface test
              • -
              • Check media stability
              • -
              • Controller test
              • -```html

                You can download and install Flobo Hard Disk Repair 4.1 Full Crack Idm for free from a reliable source and use it on any Windows operating system and any type of hard disk . It has a user-friendly interface and simple operation that makes it easy for anyone to use.

                -

                If you want to fix your hard disk errors and bad sectors and enjoy a faster and safer hard disk performance , don't hesitate to download Flobo Hard Disk Repair 4.1 Full Crack Idm today! You will be amazed by the results!

                -

                And if you found this article helpful, please share it with your friends who might need it! You can also leave a comment below if you have any questions or feedback! We would love to hear from you!

                -

                FAQs

                -

                Here are some frequently asked questions about Flobo Hard Disk Repair 4.1 Full Crack Idm:

                -
                  -
                1. Is Flobo Hard Disk Repair 4.1 Full Crack Idm safe to use?
                  Flobo Hard Disk Repair 4.1 Full Crack Idm is safe to use as long as you download it from a reliable source . However, you should be aware that using cracked software may violate the terms and conditions of the original software and may expose you to legal or security risks. Therefore, we recommend that you use Flobo Hard Disk Repair 4.1 Full Crack Idm at your own risk and discretion.
                2. -
                3. How long does it take to scan and repair a hard disk with Flobo Hard Disk Repair 4.1 Full Crack Idm?
                  The time it takes to scan and repair a hard disk with Flobo Hard Disk Repair 4.1 Full Crack Idm depends on several factors, such as the size and condition of your hard disk , the mode you choose (quick scan or full scan), and the speed of your computer . Generally, a quick scan takes a few minutes, while a full scan may take several hours.
                4. -
                5. Can Flobo Hard Disk Repair 4.1 Full Crack Idm recover data from bad sectors?
                  Flobo Hard Disk Repair 4.1 Full Crack Idm can recover data from bad sectors by trying to fix them . However, this is not guaranteed and depends on the severity of the damage . If the bad sectors are too damaged or corrupted , Flobo Hard Disk Repair 4.1 Full Crack Idm may not be able to fix them or recover the data . In that case, you may need to use a data recovery software or service to try to recover your data.
                6. -
                7. Can Flobo Hard Disk Repair 4.1 Full Crack Idm prevent bad sectors from happening?
                  Flobo Hard Disk Repair 4.1 Full Crack Idm can prevent bad sectors from happening by repairing them as soon as they are detected . However, this does not mean that your hard disk will never have bad sectors again . Bad sectors can happen due to various reasons, such as physical damage , software errors , power surges , overheating , etc. Therefore, you should always backup your data regularly and take good care of your hard disk by keeping it clean , cool , and stable .
                8. -
                9. Can I use Flobo Hard Disk Repair 4.1 Full Crack Idm on other devices besides hard disks?
                  Flobo Hard Disk Repair 4.1 Full Crack Idm is designed to work on hard disks only . It does not support other devices , such as SSDs , flash drives , memory cards , etc. If you want to repair other devices , you may need to use other tools that are compatible with them.
                10. -
                - ```

                0a6ba089eb
                -
                -
                \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Gtouch G2 Flash File Sp7731 Hang Logo Fix Firmware Download and Install Instructions.md b/spaces/tialenAdioni/chat-gpt-api/logs/Gtouch G2 Flash File Sp7731 Hang Logo Fix Firmware Download and Install Instructions.md deleted file mode 100644 index a0249cdec1a092820dbcce5e382cc0fc225cd09f..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Gtouch G2 Flash File Sp7731 Hang Logo Fix Firmware Download and Install Instructions.md +++ /dev/null @@ -1,85 +0,0 @@ - -

                How to Fix Gtouch G2 Hang Logo Problem with SP7731 Flash File

                -

                If you have a Gtouch G2 smartphone that is stuck on the boot logo or has a dead screen, you may need to flash it with a new firmware. Flashing is the process of installing a new software on your device, which can solve various issues such as hang logo, virus infection, bootloop, or system crash.

                -

                Gtouch G2 Flash File Sp7731 Hang Logo Fix Firmware


                Download Ziphttps://urlcod.com/2uKb8T



                -

                In this article, we will show you how to flash your Gtouch G2 smartphone with SP7731 flash file using SPD Flash Tool. This flash file is compatible with the Spreadtrum SP7731 chipset that powers your device. By following the steps below, you can fix your Gtouch G2 hang logo problem and restore your device to its original state.

                -

                Requirements

                -
                  -
                • A Windows PC or laptop.
                • -
                • A USB cable to connect your device to the PC.
                • -
                • Gtouch G2 SP7731 flash file. You can download it from here [^1^].
                • -
                • SPD Flash Tool. You can download it from here.
                • -
                • SPD USB driver. You can download it from here.
                • -
                • A backup of your personal data. Flashing will erase all your data on your device, so make sure you have a copy of your important files before proceeding.
                • -
                -

                Steps to Flash Gtouch G2 with SP7731 Flash File

                -
                  -
                1. Extract the downloaded Gtouch G2 SP7731 flash file and SPD Flash Tool on your PC.
                2. -
                3. Install the SPD USB driver on your PC.
                4. -
                5. Run the SPD Flash Tool as administrator.
                6. -
                7. Click on the Load Packet button and browse to the extracted flash file folder. Select the PAC file and click Open.
                8. -
                9. Click on the Start Downloading button to begin the flashing process.
                10. -
                11. Turn off your device and remove the battery if possible.
                12. -
                13. Connect your device to the PC via USB cable while holding the Volume Down or Volume Up button. This will put your device in download mode.
                14. -
                15. The SPD Flash Tool will detect your device and start flashing it. Wait for the process to complete. You will see a green Passed message when it is done.
                16. -
                17. Disconnect your device from the PC and insert the battery if you removed it.
                18. -
                19. Turn on your device and wait for it to boot up. It may take some time for the first boot, so be patient.
                20. -
                -

                Conclusion

                -

                Congratulations! You have successfully flashed your Gtouch G2 smartphone with SP7731 flash file using SPD Flash Tool. You have fixed the hang logo problem and revived your device. You can now enjoy using your device as normal. If you have any questions or problems, feel free to leave a comment below.

                - -

                Benefits of Flashing Gtouch G2 with SP7731 Flash File

                -

                Flashing your Gtouch G2 smartphone with SP7731 flash file can bring you many benefits, such as:

                -
                  -
                • Fixing the hang logo problem and other software issues.
                • -
                • Updating your device to the latest firmware version.
                • -
                • Enhancing the performance and stability of your device.
                • -
                • Removing the bloatware and unwanted apps from your device.
                • -
                • Unlocking the network and SIM lock of your device.
                • -
                • Rooting your device and installing custom ROMs and mods.
                • -
                -

                Precautions to Take Before Flashing Gtouch G2 with SP7731 Flash File

                -

                Flashing your Gtouch G2 smartphone with SP7731 flash file is a risky process that can damage your device if not done properly. Therefore, you should take some precautions before flashing your device, such as:

                -

                How to flash Gtouch G2 Sp7731 with stock ROM
                -Gtouch G2 Sp7731 firmware download and installation guide
                -Fix Gtouch G2 hang logo problem using Sp flash tool
                -Gtouch G2 Sp7731 official firmware update and flash file
                -Best solution for Gtouch G2 Sp7731 stuck on logo issue
                -Download and install Gtouch G2 Sp7731 stock ROM free
                -Gtouch G2 Sp7731 firmware flashing tutorial and tips
                -Gtouch G2 Sp7731 hang logo fix firmware tested and working
                -Gtouch G2 Sp7731 flash file download link and password
                -How to unbrick Gtouch G2 Sp7731 using flash file and tool
                -Gtouch G2 Sp7731 firmware version and features
                -Gtouch G2 Sp7731 stock ROM backup and restore
                -How to hard reset Gtouch G2 Sp7731 and fix hang logo
                -Gtouch G2 Sp7731 custom ROM and root guide
                -How to bypass FRP lock on Gtouch G2 Sp7731 with flash file
                -How to upgrade Gtouch G2 Sp7731 to Android 11 using firmware
                -How to solve network and IMEI problems on Gtouch G2 Sp7731 with flash file
                -How to repair bootloop and dead boot on Gtouch G2 Sp7731 with firmware
                -How to flash Gtouch G2 Sp7731 without PC using SD card
                -How to use SP Flash Tool to flash Gtouch G2 Sp7731 firmware
                -How to flash TWRP recovery on Gtouch G2 Sp7731 and install custom ROMs
                -How to backup and restore NVRAM on Gtouch G2 Sp7731 with flash file
                -How to enable USB debugging and OEM unlocking on Gtouch G2 Sp7731
                -How to fix camera and wifi issues on Gtouch G2 Sp7731 with firmware
                -How to increase performance and battery life on Gtouch G2 Sp7731 with custom ROMs
                -How to remove bloatware and malware from Gtouch G2 Sp7731 with flash file
                -How to unlock bootloader and root Gtouch G2 Sp7731 with firmware
                -How to change font and theme on Gtouch G2 Sp7731 with custom ROMs
                -How to fix touchscreen and sensor problems on Gtouch G2 Sp7731 with flash file
                -How to install Google Play Store and services on Gtouch G2 Sp7731 with firmware
                -How to flash Magisk and SuperSU on Gtouch G2 Sp7731 and root it
                -How to fix SIM card and memory card errors on Gtouch G2 Sp7731 with flash file
                -How to install Xposed Framework and modules on Gtouch G2 Sp7731 with custom ROMs
                -How to fix Bluetooth and GPS issues on Gtouch

                -
                  -
                • Make sure your device has enough battery charge to avoid interruption during the flashing process.
                • -
                • Use a good quality USB cable and a reliable PC to flash your device.
                • -
                • Do not disconnect or power off your device during the flashing process.
                • -
                • Do not flash your device with an incompatible or corrupted flash file.
                • -
                • Do not flash your device if you do not know what you are doing. Seek professional help if you are unsure.
                • -

                e753bf7129
                -
                -
                \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/How to Get From Mumbai to Goa - TripSavvy.md b/spaces/tialenAdioni/chat-gpt-api/logs/How to Get From Mumbai to Goa - TripSavvy.md deleted file mode 100644 index b86c8e9c73a71f98a891caa7e9b93dd878bd721c..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/How to Get From Mumbai to Goa - TripSavvy.md +++ /dev/null @@ -1,119 +0,0 @@ - -

                How to Free Download Journey Bombay To Goa Full Movie Hd Mkv

                -

                If you are looking for a comedy movie to watch with your friends or family, you might want to check out Journey Bombay To Goa, a 2007 Bollywood movie starring Sunil Pal, Raju Srivastava, Vijay Raaz and many other comedians. The movie is about a group of people who travel from Bombay to Goa on a bus that is assembled from scrap parts. Along the way, they encounter many hilarious situations and adventures.

                -

                Free Download Journey Bombay To Goa Full Movie Hd Mkv


                Download ✺✺✺ https://urlcod.com/2uK8pg



                -

                But how can you watch this movie without spending any money or going to the theater? Well, you can free download Journey Bombay To Goa full movie hd mkv from the internet. Mkv is a video format that can store high-quality videos in a smaller file size. You can play mkv files on your PC, laptop, smartphone or TV with the help of a media player that supports mkv format.

                -

                Where to Free Download Journey Bombay To Goa Full Movie Hd Mkv

                -

                There are many websites that offer free download Journey Bombay To Goa full movie hd mkv. However, not all of them are safe and reliable. Some of them might contain viruses, malware or spyware that can harm your device or steal your personal information. Some of them might also have broken links, low-quality videos or incomplete files that can ruin your viewing experience.

                -

                Therefore, you need to be careful and choose a trustworthy website to free download Journey Bombay To Goa full movie hd mkv. Here are some tips on how to find a good website:

                -
                  -
                • Look for websites that have positive reviews and ratings from other users. You can read the comments and feedbacks of other people who have downloaded the movie from the website and see if they are satisfied or not.
                • -
                • Look for websites that have fast download speed and no annoying ads or pop-ups. You don't want to waste your time or bandwidth on slow downloads or unwanted interruptions.
                • -
                • Look for websites that have multiple download links and options. You want to have a backup plan in case one link doesn't work or is unavailable.
                • -
                • Look for websites that have clear and simple instructions on how to download the movie. You don't want to get confused or lost in the process.
                • -
                -

                How to Free Download Journey Bombay To Goa Full Movie Hd Mkv

                -

                Once you have found a good website to free download Journey Bombay To Goa full movie hd mkv, you can follow these steps to download the movie:

                -
                  -
                1. Click on the download link or button that is provided by the website. You might have to choose a server or a mirror site to start the download.
                2. -
                3. Wait for the download to finish. Depending on your internet speed and the file size, it might take some time to complete the download.
                4. -
                5. Locate the downloaded file on your device. It should be in the mkv format and have the name of the movie.
                6. -
                7. Open the file with a media player that supports mkv format. You can use VLC Media Player, KMPlayer, PotPlayer or any other media player that can play mkv files.
                8. -
                9. Enjoy watching Journey Bombay To Goa full movie hd mkv on your device.
                10. -
                -

                Conclusion

                -

                Journey Bombay To Goa is a funny and entertaining movie that you can watch with your friends or family. You can free download Journey Bombay To Goa full movie hd mkv from the internet and watch it on your device without spending any money or going to the theater. However, you need to be careful and choose a trustworthy website to download the movie from. You also need to have a media player that can play mkv files on your device. By following these tips and steps, you can enjoy watching Journey Bombay To Goa full movie hd mkv anytime and anywhere.

                -

                How to Free Download Journey Bombay To Goa Full Movie Hd Mkv

                -

                If you are looking for a comedy movie to watch with your friends or family, you might want to check out Journey Bombay To Goa, a 2007 Bollywood movie starring Sunil Pal, Raju Srivastava, Vijay Raaz and many other comedians. The movie is about a group of people who travel from Bombay to Goa on a bus that is assembled from scrap parts. Along the way, they encounter many hilarious situations and adventures.

                -

                Watch Journey Bombay To Goa (2007) Full HD Comedy Hindi Movie Online
                -Journey Bombay To Goa Full Hindi Movie | Sunil Pal, Raju Srivastava, Vijay Raaz
                -How to Download Journey Bombay To Goa Bollywood Comedy Film in HD Quality
                -Journey Bombay To Goa Movie Review | A Hilarious Road Trip with a Hidden Treasure
                -Best Scenes from Journey Bombay To Goa | Shakti Kapoor, Sanjay Mishra, Rakhi Sawant
                -Journey Bombay To Goa Streaming Online | Where to Watch for Free
                -Journey Bombay To Goa Songs Download | Nitin Shankar Music Album
                -Journey Bombay To Goa Cast and Crew | Full List of Actors and Filmmakers
                -Journey Bombay To Goa Box Office Collection | How Much Did It Earn?
                -Journey Bombay To Goa Trivia | Did You Know These Facts About the Movie?
                -Journey Bombay To Goa Movie Quotes | Funny Dialogues and One-Liners
                -Journey Bombay To Goa Movie Poster | Download HD Wallpaper for Desktop and Mobile
                -Journey Bombay To Goa Behind the Scenes | Making of the Bus and the Statue
                -Journey Bombay To Goa Movie Awards and Nominations | Which Ones Did It Win?
                -Journey Bombay To Goa Movie Location | Where Was It Shot?
                -Journey Bombay To Goa Movie Plot | What Happens in the Story?
                -Journey Bombay To Goa Movie Trailer | Watch the Official Promo Video
                -Journey Bombay To Goa Movie Rating | How Many Stars Did It Get?
                -Journey Bombay To Goa Movie Subtitles | Download English, Hindi and Other Languages
                -Journey Bombay To Goa Movie Memes | Funny Images and Videos on Social Media
                -Journey Bombay To Goa Movie Parody | Watch the Spoof Version by Comedy Circus
                -Journey Bombay To Goa Movie Remake | Will There Be a Sequel or a Spin-off?
                -Journey Bombay To Goa Movie References | Which Other Movies and Shows Inspired It?
                -Journey Bombay To Goa Movie Analysis | What Is the Theme and Message of the Film?
                -Journey Bombay To Goa Movie Controversy | What Were the Issues and Criticisms Faced by the Film?
                -Journey Bombay To Goa Full HD 1080p Download Link | How to Get It for Free
                -Journey Bombay To Goa MKV Format Download | How to Convert It to MP4 or AVI
                -Journey Bombay To Goa Torrent Download | How to Use BitTorrent or uTorrent Safely
                -Journey Bombay To Goa DVD Rip Download | How to Burn It to a Disc or USB Drive
                -Journey Bombay To Goa Blu-ray Download | How to Enjoy It on a Big Screen TV
                -Journey Bombay To Goa Netflix Streaming | How to Watch It with a Subscription
                -Journey Bombay To Goa Amazon Prime Video Streaming | How to Watch It with a Membership
                -Journey Bombay To Goa Disney+ Hotstar Streaming | How to Watch It with a VIP or Premium Account
                -Journey Bombay To Goa Zee5 Streaming | How to Watch It with a Subscription or an All Access Pass
                -Journey Bombay To Goa SonyLIV Streaming | How to Watch It with a Subscription or an Ad-Free Plan
                -Journey Bombay To Goa Voot Streaming | How to Watch It with a Subscription or a Select Plan
                -Journey Bombay To Goa MX Player Streaming | How to Watch It for Free with Ads
                -Journey Bombay To Goa YouTube Streaming | How to Watch It for Free with Ads or Rent It for a Fee
                -Journey Bombay To Goa JioCinema Streaming | How to Watch It for Free with a Jio SIM Card
                -Journey Bombay To Goa Airtel Xstream Streaming | How to Watch It for Free with an Airtel SIM Card
                -Journey Bombay To Goa Vi Movies and TV Streaming | How to Watch It for Free with a Vi SIM Card
                -Journey Bombay To Goa Eros Now Streaming | How to Watch It with a Subscription or a Plus Plan
                -Journey Bombay To Goa Hungama Play Streaming | How to Watch It with a Subscription or a Rent Option
                -Journey Bombay To Gao ShemarooMe Streaming | How to Watch It with a Subscription or a Bollywood Plan
                -Journe

                -

                But how can you watch this movie without spending any money or going to the theater? Well, you can free download Journey Bombay To Goa full movie hd mkv from the internet. Mkv is a video format that can store high-quality videos in a smaller file size. You can play mkv files on your PC, laptop, smartphone or TV with the help of a media player that supports mkv format.

                -

                Where to Free Download Journey Bombay To Goa Full Movie Hd Mkv

                -

                There are many websites that offer free download Journey Bombay To Goa full movie hd mkv. However, not all of them are safe and reliable. Some of them might contain viruses, malware or spyware that can harm your device or steal your personal information. Some of them might also have broken links, low-quality videos or incomplete files that can ruin your viewing experience.

                -

                Therefore, you need to be careful and choose a trustworthy website to free download Journey Bombay To Goa full movie hd mkv. Here are some tips on how to find a good website:

                -
                  -
                • Look for websites that have positive reviews and ratings from other users. You can read the comments and feedbacks of other people who have downloaded the movie from the website and see if they are satisfied or not.
                • -
                • Look for websites that have fast download speed and no annoying ads or pop-ups. You don't want to waste your time or bandwidth on slow downloads or unwanted interruptions.
                • -
                • Look for websites that have multiple download links and options. You want to have a backup plan in case one link doesn't work or is unavailable.
                • -
                • Look for websites that have clear and simple instructions on how to download the movie. You don't want to get confused or lost in the process.
                • -
                -

                How to Free Download Journey Bombay To Goa Full Movie Hd Mkv

                -

                Once you have found a good website to free download Journey Bombay To Goa full movie hd mkv, you can follow these steps to download the movie:

                -
                  -
                1. Click on the download link or button that is provided by the website. You might have to choose a server or a mirror site to start the download.
                2. -
                3. Wait for the download to finish. Depending on your internet speed and the file size, it might take some time to complete the download.
                4. -
                5. Locate the downloaded file on your device. It should be in the mkv format and have the name of the movie.
                6. -
                7. Open the file with a media player that supports mkv format. You can use VLC Media Player, KMPlayer, PotPlayer or any other media player that can play mkv files.
                8. -
                9. Enjoy watching Journey Bombay To Goa full movie hd mkv on your device.
                10. -
                -

                What are the Features of Journey Bombay To Goa Full Movie Hd Mkv

                -

                When you free download Journey Bombay To Goa full movie hd mkv, you can enjoy some of the features that make this movie a great comedy. Here are some of them:

                -
                  -
                • The movie has a star-studded cast of comedians, such as Sunil Pal, Raju Srivastava, Vijay Raaz, Ehsaan Qureshi, Sudhir Pandey, Tinnu Anand, Asrani and many others. They deliver hilarious dialogues and performances that will make you laugh out loud.
                • -
                • The movie has a simple but engaging plot that revolves around a treasure hunt. The movie has many twists and turns that will keep you hooked and entertained. The movie also has some action and romance scenes that add to the fun.
                • -
                • The movie has a high-quality video and audio that will enhance your viewing experience. The movie is in hd mkv format, which means that it has high-definition video and audio that can be played on any device. The movie also has clear and crisp subtitles that will help you understand the dialogues better.
                • -
                • The movie has a low file size that will save your storage space and bandwidth. The movie is in mkv format, which means that it can compress high-quality videos in a smaller file size. You can download the movie faster and easier without worrying about running out of space or data.
                • -
                -

                What are the Risks of Free Downloading Journey Bombay To Goa Full Movie Hd Mkv

                -

                While free downloading Journey Bombay To Goa full movie hd mkv can have some benefits, it can also have some risks that you should be aware of. Here are some of them:

                -
                  -
                • You might download a fake or corrupted file that can damage your device or steal your personal information. Some websites might offer you a fake or corrupted file that looks like the movie, but actually contains viruses, malware or spyware that can harm your device or access your personal information.
                • -
                • You might download a low-quality or incomplete file that can ruin your viewing experience. Some websites might offer you a low-quality or incomplete file that has poor video or audio quality, missing scenes or subtitles or wrong language or format.
                • -
                • You might download an illegal or pirated file that can get you in trouble with the law. Some websites might offer you an illegal or pirated file that violates the copyright laws and infringes the rights of the original creators and distributors of the movie. You might face legal consequences such as fines or imprisonment if you download or share such files.
                • -
                -

                How to Avoid the Risks of Free Downloading Journey Bombay To Goa Full Movie Hd Mkv

                -

                If you want to avoid the risks of free downloading Journey Bombay To Goa full movie hd mkv, you can follow these tips:

                -
                  -
                • Use a reliable antivirus software and firewall to protect your device and personal information from viruses, malware and spyware. You should scan your device regularly and update your antivirus software and firewall frequently.
                • -
                • Use a trusted website to download the movie from. You should check the reviews and ratings of other users who have downloaded the movie from the website and see if they are satisfied or not. You should also check the download speed, ads and pop-ups of the website and see if they are annoying or not.
                • -
                • Use a legal source to download the movie from. You should respect the copyright laws and the rights of the original creators and distributors of the movie. You should also support them by paying for their work or watching their ads.
                • -
                -

                Conclusion

                -

                Journey Bombay To Goa is a funny and entertaining movie that you can watch with your friends or family. You can free download Journey Bombay To Goa full movie hd mkv from the internet and watch it on your device without spending any money or going to the theater. However, you need to be careful and choose a trustworthy website to download the movie from. You also need to have a media player that can play mkv files on your device. By following these tips and steps, you can enjoy watching Journey Bombay To Goa full movie hd mkv anytime and anywhere.

                -

                Conclusion

                -

                Journey Bombay To Goa is a funny and entertaining movie that you can watch with your friends or family. You can free download Journey Bombay To Goa full movie hd mkv from the internet and watch it on your device without spending any money or going to the theater. However, you need to be careful and choose a trustworthy website to download the movie from. You also need to have a media player that can play mkv files on your device. By following these tips and steps, you can enjoy watching Journey Bombay To Goa full movie hd mkv anytime and anywhere.

                679dcb208e
                -
                -
                \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/The-Squailfish-In-Italian-Free-Download.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/The-Squailfish-In-Italian-Free-Download.md deleted file mode 100644 index 3582c6238fcccfa6b2b0672341481b07ae784760..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/The-Squailfish-In-Italian-Free-Download.md +++ /dev/null @@ -1,86 +0,0 @@ -## The Squailfish In Italian Free Download - - - - - - ![The Squailfish In Italian Free Download](https://sites.lsa.umich.edu/bcoppola/wp-content/uploads/sites/469/2019/10/1942-FrenchFleetA-854x1024.jpg) - - - - - -**DOWNLOAD ⇒ [https://urlcod.com/2txiSc](https://urlcod.com/2txiSc)** - - - - - - - - - - - - - -# How to Watch The Squailfish in Italian for Free Online - - - -The Squailfish is a 2012 comedy film directed by John Smith and starring Jane Doe and John Doe. It tells the story of a couple who accidentally buy a sailboat that turns out to be a submarine. The film was nominated for several awards, including Best Comedy at the Oscars[^1^]. - - - -If you want to watch The Squailfish in Italian for free online, you have a few options. One of them is to use a VPN service that allows you to change your IP address and access geo-restricted content. A VPN can also protect your privacy and security online by encrypting your data and hiding your identity. - - - -Another option is to use a streaming platform that offers The Squailfish in Italian with subtitles or dubbing. Some of these platforms may require a subscription or a registration, while others may be free but have ads or low quality. You should always check the legality and safety of any website before using it. - - - -A third option is to download The Squailfish in Italian from a torrent site or a file-sharing platform. However, this method is not recommended as it may violate the copyright laws and expose you to malware or viruses. You should always respect the creators and pay for their work if possible. - - - -Whatever option you choose, make sure you enjoy The Squailfish in Italian for free online. It is a hilarious and entertaining film that will make you laugh and smile. - - - -If you are wondering what makes The Squailfish so funny and enjoyable, here are some of the reasons. First of all, the film has a witty and clever script that is full of jokes and puns. The dialogue is fast-paced and hilarious, and the characters are quirky and relatable. - - - -Secondly, the film has a great cast that delivers excellent performances. Jane Doe and John Doe have a great chemistry as the main couple, and they show their comedic skills and timing. The supporting actors are also very funny and memorable, especially the ones who play the villains and the sailors. - - - -Thirdly, the film has a lot of action and adventure that keeps you on the edge of your seat. The scenes where the sailboat turns into a submarine and goes underwater are thrilling and exciting. The film also has some twists and surprises that keep you guessing and engaged. - - - -In conclusion, The Squailfish is a comedy film that you should not miss. It is a perfect choice for a movie night with your friends or family. You will have a lot of fun watching it in Italian for free online. - - - -Now that you know how to watch The Squailfish in Italian for free online, you may be wondering what other films you can watch in the same genre. If you are a fan of comedy films, here are some recommendations for you. - - - -- **The Hangover**: This is a classic comedy film that follows four friends who go to Las Vegas for a bachelor party and wake up the next day with no memory of what happened. They have to find their missing friend and deal with the consequences of their wild night. - -- **Bridesmaids**: This is a hilarious comedy film that focuses on the friendship and rivalry between a group of women who are preparing for a wedding. The film has a lot of humor and heart, and it features some of the funniest scenes ever. - -- **Hot Fuzz**: This is a brilliant comedy film that parodies the action and crime genres. It stars Simon Pegg and Nick Frost as two police officers who are transferred to a seemingly peaceful village that hides a dark secret. The film is full of references and jokes, and it has some amazing action sequences. - - - -These are some of the comedy films that you can watch in Italian for free online. They are all very entertaining and enjoyable, and they will make you laugh out loud. You can find them on various streaming platforms or download them from legal sources. - - 1b8d091108 - - - - - diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/MagicISO Maker 5.5 Serial.md b/spaces/tioseFevbu/cartoon-converter/scripts/MagicISO Maker 5.5 Serial.md deleted file mode 100644 index ffecd964b97270bdb9151fd2e2ba46050c4b8a7f..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/MagicISO Maker 5.5 Serial.md +++ /dev/null @@ -1,31 +0,0 @@ -
                -

                MagicISO Maker 5.5: A Powerful Tool for Creating and Editing CD/DVD Image Files

                -

                MagicISO Maker 5.5 is a software program that allows you to create, edit, extract, and burn CD/DVD image files. It supports various formats, such as ISO, BIN, NRG, UIF, and more. You can use it to make bootable CDs or DVDs, backup your data, or convert image files between different formats.

                -

                With MagicISO Maker 5.5, you can easily add, delete, rename, or extract files or folders from an image file. You can also create a virtual drive to mount an image file without burning it to a disc. MagicISO Maker 5.5 can handle large image files up to 10 GB in size, and it has a simple and user-friendly interface.

                -

                MagicISO Maker 5.5 Serial


                DOWNLOAD ››››› https://urlcod.com/2uHyH9



                -

                If you want to try MagicISO Maker 5.5, you can download a trial version from the official website[^5^] [^6^]. The trial version has some limitations, such as a maximum image file size of 300 MB and a watermark on the output. To unlock the full features of MagicISO Maker 5.5, you need to purchase a license key from the website or from other online sources[^2^]. However, be careful not to download any cracked or illegal versions of MagicISO Maker 5.5, as they may contain viruses or malware that can harm your computer[^1^] [^3^] [^4^].

                -

                MagicISO Maker 5.5 is a useful and versatile tool for anyone who works with CD/DVD image files. Whether you want to create a bootable disc, backup your data, or convert an image file format, MagicISO Maker 5.5 can help you do it easily and quickly.

                Here are some more details about MagicISO Maker 5.5 and how to use it.

                -

                How to create a bootable disc with MagicISO Maker 5.5

                -

                A bootable disc is a disc that can run an operating system or a program without requiring a hard drive. You can use a bootable disc to install or repair a system, run a diagnostic tool, or access a recovery mode. To create a bootable disc with MagicISO Maker 5.5, you need to follow these steps:

                -
                  -
                1. Launch MagicISO Maker 5.5 and click on the "File" menu. Select "New" and then "CD/DVD Image".
                2. -
                3. Select the type of disc you want to create, such as CD-ROM, DVD-ROM, or CD-RW. You can also choose the file system, such as ISO9660, Joliet, or UDF.
                4. -
                5. Click on the "Bootable" tab and check the box that says "Make Image Bootable". You can then select the source of the boot image file, such as a floppy disk, a hard drive, or an existing image file. You can also adjust the emulation type and the load segment.
                6. -
                7. Add the files or folders that you want to include in the bootable disc by dragging and dropping them from your computer to the MagicISO Maker 5.5 window. You can also use the buttons on the toolbar to add, delete, rename, or extract files or folders.
                8. -
                9. When you are done adding the files or folders, click on the "Save" button and choose a name and a location for your image file. You can also choose the compression level and the split size if you want.
                10. -
                11. After saving your image file, you can burn it to a blank disc by clicking on the "Tools" menu and selecting "Burn CD/DVD with ISO". You can then select your burner device, the write speed, and the number of copies. You can also verify the data after burning.
                12. -
                13. Once the burning process is complete, you can eject your bootable disc and use it on any compatible computer.
                14. -
                -

                What are the advantages of using MagicISO Maker 5.5 over other similar programs?

                -

                MagicISO Maker 5.5 has several advantages over other similar programs that create or edit CD/DVD image files. Some of these advantages are:

                -

                -
                  -
                • MagicISO Maker 5.5 supports a wide range of image file formats, such as ISO, BIN, NRG, UIF, DMG, MDF, and more. It can also convert between different formats with ease.
                • -
                • MagicISO Maker 5.5 can create bootable discs for various operating systems, such as Windows, Linux, DOS, or Mac OS. It can also create multi-boot discs that can run more than one operating system from a single disc.
                • -
                • MagicISO Maker 5.5 can handle large image files up to 10 GB in size, which is useful for creating DVD images or backing up data.
                • -
                • MagicISO Maker 5.5 has a simple and user-friendly interface that makes it easy to use for beginners and advanced users alike.
                • -
                • MagicISO Maker 5.5 has a low price compared to other similar programs that offer less features or functionality.
                • -
                -

                These are some of the reasons why MagicISO Maker 5.5 is a powerful and versatile tool for creating and editing CD/DVD image files.

                7b8c122e87
                -
                -
                \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/chardet/mbcssm.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/chardet/mbcssm.py deleted file mode 100644 index d3b9c4b75a23f65dd5f2d75a5e5a392708e18ee0..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/chardet/mbcssm.py +++ /dev/null @@ -1,660 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is mozilla.org code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .enums import MachineState - -# BIG5 - -# fmt: off -BIG5_CLS = ( - 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 #allow 0x00 as legal value - 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f - 1, 1, 1, 1, 1, 1, 1, 1, # 10 - 17 - 1, 1, 1, 0, 1, 1, 1, 1, # 18 - 1f - 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 27 - 1, 1, 1, 1, 1, 1, 1, 1, # 28 - 2f - 1, 1, 1, 1, 1, 1, 1, 1, # 30 - 37 - 1, 1, 1, 1, 1, 1, 1, 1, # 38 - 3f - 2, 2, 2, 2, 2, 2, 2, 2, # 40 - 47 - 2, 2, 2, 2, 2, 2, 2, 2, # 48 - 4f - 2, 2, 2, 2, 2, 2, 2, 2, # 50 - 57 - 2, 2, 2, 2, 2, 2, 2, 2, # 58 - 5f - 2, 2, 2, 2, 2, 2, 2, 2, # 60 - 67 - 2, 2, 2, 2, 2, 2, 2, 2, # 68 - 6f - 2, 2, 2, 2, 2, 2, 2, 2, # 70 - 77 - 2, 2, 2, 2, 2, 2, 2, 1, # 78 - 7f - 4, 4, 4, 4, 4, 4, 4, 4, # 80 - 87 - 4, 4, 4, 4, 4, 4, 4, 4, # 88 - 8f - 4, 4, 4, 4, 4, 4, 4, 4, # 90 - 97 - 4, 4, 4, 4, 4, 4, 4, 4, # 98 - 9f - 4, 3, 3, 3, 3, 3, 3, 3, # a0 - a7 - 3, 3, 3, 3, 3, 3, 3, 3, # a8 - af - 3, 3, 3, 3, 3, 3, 3, 3, # b0 - b7 - 3, 3, 3, 3, 3, 3, 3, 3, # b8 - bf - 3, 3, 3, 3, 3, 3, 3, 3, # c0 - c7 - 3, 3, 3, 3, 3, 3, 3, 3, # c8 - cf - 3, 3, 3, 3, 3, 3, 3, 3, # d0 - d7 - 3, 3, 3, 3, 3, 3, 3, 3, # d8 - df - 3, 3, 3, 3, 3, 3, 3, 3, # e0 - e7 - 3, 3, 3, 3, 3, 3, 3, 3, # e8 - ef - 3, 3, 3, 3, 3, 3, 3, 3, # f0 - f7 - 3, 3, 3, 3, 3, 3, 3, 0 # f8 - ff -) - -BIG5_ST = ( - MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07 - MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,#08-0f - MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START#10-17 -) -# fmt: on - -BIG5_CHAR_LEN_TABLE = (0, 1, 1, 2, 0) - -BIG5_SM_MODEL = { - "class_table": BIG5_CLS, - "class_factor": 5, - "state_table": BIG5_ST, - "char_len_table": BIG5_CHAR_LEN_TABLE, - "name": "Big5", -} - -# CP949 -# fmt: off -CP949_CLS = ( - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, # 00 - 0f - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, # 10 - 1f - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 2f - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 30 - 3f - 1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, # 40 - 4f - 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 1, 1, 1, # 50 - 5f - 1, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, # 60 - 6f - 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 1, 1, 1, # 70 - 7f - 0, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, # 80 - 8f - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, # 90 - 9f - 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, # a0 - af - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, # b0 - bf - 7, 7, 7, 7, 7, 7, 9, 2, 2, 3, 2, 2, 2, 2, 2, 2, # c0 - cf - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, # d0 - df - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, # e0 - ef - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, # f0 - ff -) - -CP949_ST = ( -#cls= 0 1 2 3 4 5 6 7 8 9 # previous state = - MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START, 4, 5,MachineState.ERROR, 6, # MachineState.START - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, # MachineState.ERROR - MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME, # MachineState.ITS_ME - MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 3 - MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 4 - MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 5 - MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 6 -) -# fmt: on - -CP949_CHAR_LEN_TABLE = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2) - -CP949_SM_MODEL = { - "class_table": CP949_CLS, - "class_factor": 10, - "state_table": CP949_ST, - "char_len_table": CP949_CHAR_LEN_TABLE, - "name": "CP949", -} - -# EUC-JP -# fmt: off -EUCJP_CLS = ( - 4, 4, 4, 4, 4, 4, 4, 4, # 00 - 07 - 4, 4, 4, 4, 4, 4, 5, 5, # 08 - 0f - 4, 4, 4, 4, 4, 4, 4, 4, # 10 - 17 - 4, 4, 4, 5, 4, 4, 4, 4, # 18 - 1f - 4, 4, 4, 4, 4, 4, 4, 4, # 20 - 27 - 4, 4, 4, 4, 4, 4, 4, 4, # 28 - 2f - 4, 4, 4, 4, 4, 4, 4, 4, # 30 - 37 - 4, 4, 4, 4, 4, 4, 4, 4, # 38 - 3f - 4, 4, 4, 4, 4, 4, 4, 4, # 40 - 47 - 4, 4, 4, 4, 4, 4, 4, 4, # 48 - 4f - 4, 4, 4, 4, 4, 4, 4, 4, # 50 - 57 - 4, 4, 4, 4, 4, 4, 4, 4, # 58 - 5f - 4, 4, 4, 4, 4, 4, 4, 4, # 60 - 67 - 4, 4, 4, 4, 4, 4, 4, 4, # 68 - 6f - 4, 4, 4, 4, 4, 4, 4, 4, # 70 - 77 - 4, 4, 4, 4, 4, 4, 4, 4, # 78 - 7f - 5, 5, 5, 5, 5, 5, 5, 5, # 80 - 87 - 5, 5, 5, 5, 5, 5, 1, 3, # 88 - 8f - 5, 5, 5, 5, 5, 5, 5, 5, # 90 - 97 - 5, 5, 5, 5, 5, 5, 5, 5, # 98 - 9f - 5, 2, 2, 2, 2, 2, 2, 2, # a0 - a7 - 2, 2, 2, 2, 2, 2, 2, 2, # a8 - af - 2, 2, 2, 2, 2, 2, 2, 2, # b0 - b7 - 2, 2, 2, 2, 2, 2, 2, 2, # b8 - bf - 2, 2, 2, 2, 2, 2, 2, 2, # c0 - c7 - 2, 2, 2, 2, 2, 2, 2, 2, # c8 - cf - 2, 2, 2, 2, 2, 2, 2, 2, # d0 - d7 - 2, 2, 2, 2, 2, 2, 2, 2, # d8 - df - 0, 0, 0, 0, 0, 0, 0, 0, # e0 - e7 - 0, 0, 0, 0, 0, 0, 0, 0, # e8 - ef - 0, 0, 0, 0, 0, 0, 0, 0, # f0 - f7 - 0, 0, 0, 0, 0, 0, 0, 5 # f8 - ff -) - -EUCJP_ST = ( - 3, 4, 3, 5,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f - MachineState.ITS_ME,MachineState.ITS_ME,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17 - MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 3,MachineState.ERROR,#18-1f - 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START#20-27 -) -# fmt: on - -EUCJP_CHAR_LEN_TABLE = (2, 2, 2, 3, 1, 0) - -EUCJP_SM_MODEL = { - "class_table": EUCJP_CLS, - "class_factor": 6, - "state_table": EUCJP_ST, - "char_len_table": EUCJP_CHAR_LEN_TABLE, - "name": "EUC-JP", -} - -# EUC-KR -# fmt: off -EUCKR_CLS = ( - 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 - 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f - 1, 1, 1, 1, 1, 1, 1, 1, # 10 - 17 - 1, 1, 1, 0, 1, 1, 1, 1, # 18 - 1f - 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 27 - 1, 1, 1, 1, 1, 1, 1, 1, # 28 - 2f - 1, 1, 1, 1, 1, 1, 1, 1, # 30 - 37 - 1, 1, 1, 1, 1, 1, 1, 1, # 38 - 3f - 1, 1, 1, 1, 1, 1, 1, 1, # 40 - 47 - 1, 1, 1, 1, 1, 1, 1, 1, # 48 - 4f - 1, 1, 1, 1, 1, 1, 1, 1, # 50 - 57 - 1, 1, 1, 1, 1, 1, 1, 1, # 58 - 5f - 1, 1, 1, 1, 1, 1, 1, 1, # 60 - 67 - 1, 1, 1, 1, 1, 1, 1, 1, # 68 - 6f - 1, 1, 1, 1, 1, 1, 1, 1, # 70 - 77 - 1, 1, 1, 1, 1, 1, 1, 1, # 78 - 7f - 0, 0, 0, 0, 0, 0, 0, 0, # 80 - 87 - 0, 0, 0, 0, 0, 0, 0, 0, # 88 - 8f - 0, 0, 0, 0, 0, 0, 0, 0, # 90 - 97 - 0, 0, 0, 0, 0, 0, 0, 0, # 98 - 9f - 0, 2, 2, 2, 2, 2, 2, 2, # a0 - a7 - 2, 2, 2, 2, 2, 3, 3, 3, # a8 - af - 2, 2, 2, 2, 2, 2, 2, 2, # b0 - b7 - 2, 2, 2, 2, 2, 2, 2, 2, # b8 - bf - 2, 2, 2, 2, 2, 2, 2, 2, # c0 - c7 - 2, 3, 2, 2, 2, 2, 2, 2, # c8 - cf - 2, 2, 2, 2, 2, 2, 2, 2, # d0 - d7 - 2, 2, 2, 2, 2, 2, 2, 2, # d8 - df - 2, 2, 2, 2, 2, 2, 2, 2, # e0 - e7 - 2, 2, 2, 2, 2, 2, 2, 2, # e8 - ef - 2, 2, 2, 2, 2, 2, 2, 2, # f0 - f7 - 2, 2, 2, 2, 2, 2, 2, 0 # f8 - ff -) - -EUCKR_ST = ( - MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07 - MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #08-0f -) -# fmt: on - -EUCKR_CHAR_LEN_TABLE = (0, 1, 2, 0) - -EUCKR_SM_MODEL = { - "class_table": EUCKR_CLS, - "class_factor": 4, - "state_table": EUCKR_ST, - "char_len_table": EUCKR_CHAR_LEN_TABLE, - "name": "EUC-KR", -} - -# JOHAB -# fmt: off -JOHAB_CLS = ( - 4,4,4,4,4,4,4,4, # 00 - 07 - 4,4,4,4,4,4,0,0, # 08 - 0f - 4,4,4,4,4,4,4,4, # 10 - 17 - 4,4,4,0,4,4,4,4, # 18 - 1f - 4,4,4,4,4,4,4,4, # 20 - 27 - 4,4,4,4,4,4,4,4, # 28 - 2f - 4,3,3,3,3,3,3,3, # 30 - 37 - 3,3,3,3,3,3,3,3, # 38 - 3f - 3,1,1,1,1,1,1,1, # 40 - 47 - 1,1,1,1,1,1,1,1, # 48 - 4f - 1,1,1,1,1,1,1,1, # 50 - 57 - 1,1,1,1,1,1,1,1, # 58 - 5f - 1,1,1,1,1,1,1,1, # 60 - 67 - 1,1,1,1,1,1,1,1, # 68 - 6f - 1,1,1,1,1,1,1,1, # 70 - 77 - 1,1,1,1,1,1,1,2, # 78 - 7f - 6,6,6,6,8,8,8,8, # 80 - 87 - 8,8,8,8,8,8,8,8, # 88 - 8f - 8,7,7,7,7,7,7,7, # 90 - 97 - 7,7,7,7,7,7,7,7, # 98 - 9f - 7,7,7,7,7,7,7,7, # a0 - a7 - 7,7,7,7,7,7,7,7, # a8 - af - 7,7,7,7,7,7,7,7, # b0 - b7 - 7,7,7,7,7,7,7,7, # b8 - bf - 7,7,7,7,7,7,7,7, # c0 - c7 - 7,7,7,7,7,7,7,7, # c8 - cf - 7,7,7,7,5,5,5,5, # d0 - d7 - 5,9,9,9,9,9,9,5, # d8 - df - 9,9,9,9,9,9,9,9, # e0 - e7 - 9,9,9,9,9,9,9,9, # e8 - ef - 9,9,9,9,9,9,9,9, # f0 - f7 - 9,9,5,5,5,5,5,0 # f8 - ff -) - -JOHAB_ST = ( -# cls = 0 1 2 3 4 5 6 7 8 9 - MachineState.ERROR ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.ERROR ,MachineState.ERROR ,3 ,3 ,4 , # MachineState.START - MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME, # MachineState.ITS_ME - MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR , # MachineState.ERROR - MachineState.ERROR ,MachineState.START ,MachineState.START ,MachineState.ERROR ,MachineState.ERROR ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.START , # 3 - MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START , # 4 -) -# fmt: on - -JOHAB_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 0, 0, 2, 2, 2) - -JOHAB_SM_MODEL = { - "class_table": JOHAB_CLS, - "class_factor": 10, - "state_table": JOHAB_ST, - "char_len_table": JOHAB_CHAR_LEN_TABLE, - "name": "Johab", -} - -# EUC-TW -# fmt: off -EUCTW_CLS = ( - 2, 2, 2, 2, 2, 2, 2, 2, # 00 - 07 - 2, 2, 2, 2, 2, 2, 0, 0, # 08 - 0f - 2, 2, 2, 2, 2, 2, 2, 2, # 10 - 17 - 2, 2, 2, 0, 2, 2, 2, 2, # 18 - 1f - 2, 2, 2, 2, 2, 2, 2, 2, # 20 - 27 - 2, 2, 2, 2, 2, 2, 2, 2, # 28 - 2f - 2, 2, 2, 2, 2, 2, 2, 2, # 30 - 37 - 2, 2, 2, 2, 2, 2, 2, 2, # 38 - 3f - 2, 2, 2, 2, 2, 2, 2, 2, # 40 - 47 - 2, 2, 2, 2, 2, 2, 2, 2, # 48 - 4f - 2, 2, 2, 2, 2, 2, 2, 2, # 50 - 57 - 2, 2, 2, 2, 2, 2, 2, 2, # 58 - 5f - 2, 2, 2, 2, 2, 2, 2, 2, # 60 - 67 - 2, 2, 2, 2, 2, 2, 2, 2, # 68 - 6f - 2, 2, 2, 2, 2, 2, 2, 2, # 70 - 77 - 2, 2, 2, 2, 2, 2, 2, 2, # 78 - 7f - 0, 0, 0, 0, 0, 0, 0, 0, # 80 - 87 - 0, 0, 0, 0, 0, 0, 6, 0, # 88 - 8f - 0, 0, 0, 0, 0, 0, 0, 0, # 90 - 97 - 0, 0, 0, 0, 0, 0, 0, 0, # 98 - 9f - 0, 3, 4, 4, 4, 4, 4, 4, # a0 - a7 - 5, 5, 1, 1, 1, 1, 1, 1, # a8 - af - 1, 1, 1, 1, 1, 1, 1, 1, # b0 - b7 - 1, 1, 1, 1, 1, 1, 1, 1, # b8 - bf - 1, 1, 3, 1, 3, 3, 3, 3, # c0 - c7 - 3, 3, 3, 3, 3, 3, 3, 3, # c8 - cf - 3, 3, 3, 3, 3, 3, 3, 3, # d0 - d7 - 3, 3, 3, 3, 3, 3, 3, 3, # d8 - df - 3, 3, 3, 3, 3, 3, 3, 3, # e0 - e7 - 3, 3, 3, 3, 3, 3, 3, 3, # e8 - ef - 3, 3, 3, 3, 3, 3, 3, 3, # f0 - f7 - 3, 3, 3, 3, 3, 3, 3, 0 # f8 - ff -) - -EUCTW_ST = ( - MachineState.ERROR,MachineState.ERROR,MachineState.START, 3, 3, 3, 4,MachineState.ERROR,#00-07 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f - MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.ERROR,#10-17 - MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f - 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,#20-27 - MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f -) -# fmt: on - -EUCTW_CHAR_LEN_TABLE = (0, 0, 1, 2, 2, 2, 3) - -EUCTW_SM_MODEL = { - "class_table": EUCTW_CLS, - "class_factor": 7, - "state_table": EUCTW_ST, - "char_len_table": EUCTW_CHAR_LEN_TABLE, - "name": "x-euc-tw", -} - -# GB2312 -# fmt: off -GB2312_CLS = ( - 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 - 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f - 1, 1, 1, 1, 1, 1, 1, 1, # 10 - 17 - 1, 1, 1, 0, 1, 1, 1, 1, # 18 - 1f - 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 27 - 1, 1, 1, 1, 1, 1, 1, 1, # 28 - 2f - 3, 3, 3, 3, 3, 3, 3, 3, # 30 - 37 - 3, 3, 1, 1, 1, 1, 1, 1, # 38 - 3f - 2, 2, 2, 2, 2, 2, 2, 2, # 40 - 47 - 2, 2, 2, 2, 2, 2, 2, 2, # 48 - 4f - 2, 2, 2, 2, 2, 2, 2, 2, # 50 - 57 - 2, 2, 2, 2, 2, 2, 2, 2, # 58 - 5f - 2, 2, 2, 2, 2, 2, 2, 2, # 60 - 67 - 2, 2, 2, 2, 2, 2, 2, 2, # 68 - 6f - 2, 2, 2, 2, 2, 2, 2, 2, # 70 - 77 - 2, 2, 2, 2, 2, 2, 2, 4, # 78 - 7f - 5, 6, 6, 6, 6, 6, 6, 6, # 80 - 87 - 6, 6, 6, 6, 6, 6, 6, 6, # 88 - 8f - 6, 6, 6, 6, 6, 6, 6, 6, # 90 - 97 - 6, 6, 6, 6, 6, 6, 6, 6, # 98 - 9f - 6, 6, 6, 6, 6, 6, 6, 6, # a0 - a7 - 6, 6, 6, 6, 6, 6, 6, 6, # a8 - af - 6, 6, 6, 6, 6, 6, 6, 6, # b0 - b7 - 6, 6, 6, 6, 6, 6, 6, 6, # b8 - bf - 6, 6, 6, 6, 6, 6, 6, 6, # c0 - c7 - 6, 6, 6, 6, 6, 6, 6, 6, # c8 - cf - 6, 6, 6, 6, 6, 6, 6, 6, # d0 - d7 - 6, 6, 6, 6, 6, 6, 6, 6, # d8 - df - 6, 6, 6, 6, 6, 6, 6, 6, # e0 - e7 - 6, 6, 6, 6, 6, 6, 6, 6, # e8 - ef - 6, 6, 6, 6, 6, 6, 6, 6, # f0 - f7 - 6, 6, 6, 6, 6, 6, 6, 0 # f8 - ff -) - -GB2312_ST = ( - MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, 3,MachineState.ERROR,#00-07 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f - MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,#10-17 - 4,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f - MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#20-27 - MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f -) -# fmt: on - -# To be accurate, the length of class 6 can be either 2 or 4. -# But it is not necessary to discriminate between the two since -# it is used for frequency analysis only, and we are validating -# each code range there as well. So it is safe to set it to be -# 2 here. -GB2312_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 1, 2) - -GB2312_SM_MODEL = { - "class_table": GB2312_CLS, - "class_factor": 7, - "state_table": GB2312_ST, - "char_len_table": GB2312_CHAR_LEN_TABLE, - "name": "GB2312", -} - -# Shift_JIS -# fmt: off -SJIS_CLS = ( - 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 - 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f - 1, 1, 1, 1, 1, 1, 1, 1, # 10 - 17 - 1, 1, 1, 0, 1, 1, 1, 1, # 18 - 1f - 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 27 - 1, 1, 1, 1, 1, 1, 1, 1, # 28 - 2f - 1, 1, 1, 1, 1, 1, 1, 1, # 30 - 37 - 1, 1, 1, 1, 1, 1, 1, 1, # 38 - 3f - 2, 2, 2, 2, 2, 2, 2, 2, # 40 - 47 - 2, 2, 2, 2, 2, 2, 2, 2, # 48 - 4f - 2, 2, 2, 2, 2, 2, 2, 2, # 50 - 57 - 2, 2, 2, 2, 2, 2, 2, 2, # 58 - 5f - 2, 2, 2, 2, 2, 2, 2, 2, # 60 - 67 - 2, 2, 2, 2, 2, 2, 2, 2, # 68 - 6f - 2, 2, 2, 2, 2, 2, 2, 2, # 70 - 77 - 2, 2, 2, 2, 2, 2, 2, 1, # 78 - 7f - 3, 3, 3, 3, 3, 2, 2, 3, # 80 - 87 - 3, 3, 3, 3, 3, 3, 3, 3, # 88 - 8f - 3, 3, 3, 3, 3, 3, 3, 3, # 90 - 97 - 3, 3, 3, 3, 3, 3, 3, 3, # 98 - 9f - #0xa0 is illegal in sjis encoding, but some pages does - #contain such byte. We need to be more error forgiven. - 2, 2, 2, 2, 2, 2, 2, 2, # a0 - a7 - 2, 2, 2, 2, 2, 2, 2, 2, # a8 - af - 2, 2, 2, 2, 2, 2, 2, 2, # b0 - b7 - 2, 2, 2, 2, 2, 2, 2, 2, # b8 - bf - 2, 2, 2, 2, 2, 2, 2, 2, # c0 - c7 - 2, 2, 2, 2, 2, 2, 2, 2, # c8 - cf - 2, 2, 2, 2, 2, 2, 2, 2, # d0 - d7 - 2, 2, 2, 2, 2, 2, 2, 2, # d8 - df - 3, 3, 3, 3, 3, 3, 3, 3, # e0 - e7 - 3, 3, 3, 3, 3, 4, 4, 4, # e8 - ef - 3, 3, 3, 3, 3, 3, 3, 3, # f0 - f7 - 3, 3, 3, 3, 3, 0, 0, 0, # f8 - ff -) - -SJIS_ST = ( - MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f - MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START #10-17 -) -# fmt: on - -SJIS_CHAR_LEN_TABLE = (0, 1, 1, 2, 0, 0) - -SJIS_SM_MODEL = { - "class_table": SJIS_CLS, - "class_factor": 6, - "state_table": SJIS_ST, - "char_len_table": SJIS_CHAR_LEN_TABLE, - "name": "Shift_JIS", -} - -# UCS2-BE -# fmt: off -UCS2BE_CLS = ( - 0, 0, 0, 0, 0, 0, 0, 0, # 00 - 07 - 0, 0, 1, 0, 0, 2, 0, 0, # 08 - 0f - 0, 0, 0, 0, 0, 0, 0, 0, # 10 - 17 - 0, 0, 0, 3, 0, 0, 0, 0, # 18 - 1f - 0, 0, 0, 0, 0, 0, 0, 0, # 20 - 27 - 0, 3, 3, 3, 3, 3, 0, 0, # 28 - 2f - 0, 0, 0, 0, 0, 0, 0, 0, # 30 - 37 - 0, 0, 0, 0, 0, 0, 0, 0, # 38 - 3f - 0, 0, 0, 0, 0, 0, 0, 0, # 40 - 47 - 0, 0, 0, 0, 0, 0, 0, 0, # 48 - 4f - 0, 0, 0, 0, 0, 0, 0, 0, # 50 - 57 - 0, 0, 0, 0, 0, 0, 0, 0, # 58 - 5f - 0, 0, 0, 0, 0, 0, 0, 0, # 60 - 67 - 0, 0, 0, 0, 0, 0, 0, 0, # 68 - 6f - 0, 0, 0, 0, 0, 0, 0, 0, # 70 - 77 - 0, 0, 0, 0, 0, 0, 0, 0, # 78 - 7f - 0, 0, 0, 0, 0, 0, 0, 0, # 80 - 87 - 0, 0, 0, 0, 0, 0, 0, 0, # 88 - 8f - 0, 0, 0, 0, 0, 0, 0, 0, # 90 - 97 - 0, 0, 0, 0, 0, 0, 0, 0, # 98 - 9f - 0, 0, 0, 0, 0, 0, 0, 0, # a0 - a7 - 0, 0, 0, 0, 0, 0, 0, 0, # a8 - af - 0, 0, 0, 0, 0, 0, 0, 0, # b0 - b7 - 0, 0, 0, 0, 0, 0, 0, 0, # b8 - bf - 0, 0, 0, 0, 0, 0, 0, 0, # c0 - c7 - 0, 0, 0, 0, 0, 0, 0, 0, # c8 - cf - 0, 0, 0, 0, 0, 0, 0, 0, # d0 - d7 - 0, 0, 0, 0, 0, 0, 0, 0, # d8 - df - 0, 0, 0, 0, 0, 0, 0, 0, # e0 - e7 - 0, 0, 0, 0, 0, 0, 0, 0, # e8 - ef - 0, 0, 0, 0, 0, 0, 0, 0, # f0 - f7 - 0, 0, 0, 0, 0, 0, 4, 5 # f8 - ff -) - -UCS2BE_ST = ( - 5, 7, 7,MachineState.ERROR, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f - MachineState.ITS_ME,MachineState.ITS_ME, 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,#10-17 - 6, 6, 6, 6, 6,MachineState.ITS_ME, 6, 6,#18-1f - 6, 6, 6, 6, 5, 7, 7,MachineState.ERROR,#20-27 - 5, 8, 6, 6,MachineState.ERROR, 6, 6, 6,#28-2f - 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #30-37 -) -# fmt: on - -UCS2BE_CHAR_LEN_TABLE = (2, 2, 2, 0, 2, 2) - -UCS2BE_SM_MODEL = { - "class_table": UCS2BE_CLS, - "class_factor": 6, - "state_table": UCS2BE_ST, - "char_len_table": UCS2BE_CHAR_LEN_TABLE, - "name": "UTF-16BE", -} - -# UCS2-LE -# fmt: off -UCS2LE_CLS = ( - 0, 0, 0, 0, 0, 0, 0, 0, # 00 - 07 - 0, 0, 1, 0, 0, 2, 0, 0, # 08 - 0f - 0, 0, 0, 0, 0, 0, 0, 0, # 10 - 17 - 0, 0, 0, 3, 0, 0, 0, 0, # 18 - 1f - 0, 0, 0, 0, 0, 0, 0, 0, # 20 - 27 - 0, 3, 3, 3, 3, 3, 0, 0, # 28 - 2f - 0, 0, 0, 0, 0, 0, 0, 0, # 30 - 37 - 0, 0, 0, 0, 0, 0, 0, 0, # 38 - 3f - 0, 0, 0, 0, 0, 0, 0, 0, # 40 - 47 - 0, 0, 0, 0, 0, 0, 0, 0, # 48 - 4f - 0, 0, 0, 0, 0, 0, 0, 0, # 50 - 57 - 0, 0, 0, 0, 0, 0, 0, 0, # 58 - 5f - 0, 0, 0, 0, 0, 0, 0, 0, # 60 - 67 - 0, 0, 0, 0, 0, 0, 0, 0, # 68 - 6f - 0, 0, 0, 0, 0, 0, 0, 0, # 70 - 77 - 0, 0, 0, 0, 0, 0, 0, 0, # 78 - 7f - 0, 0, 0, 0, 0, 0, 0, 0, # 80 - 87 - 0, 0, 0, 0, 0, 0, 0, 0, # 88 - 8f - 0, 0, 0, 0, 0, 0, 0, 0, # 90 - 97 - 0, 0, 0, 0, 0, 0, 0, 0, # 98 - 9f - 0, 0, 0, 0, 0, 0, 0, 0, # a0 - a7 - 0, 0, 0, 0, 0, 0, 0, 0, # a8 - af - 0, 0, 0, 0, 0, 0, 0, 0, # b0 - b7 - 0, 0, 0, 0, 0, 0, 0, 0, # b8 - bf - 0, 0, 0, 0, 0, 0, 0, 0, # c0 - c7 - 0, 0, 0, 0, 0, 0, 0, 0, # c8 - cf - 0, 0, 0, 0, 0, 0, 0, 0, # d0 - d7 - 0, 0, 0, 0, 0, 0, 0, 0, # d8 - df - 0, 0, 0, 0, 0, 0, 0, 0, # e0 - e7 - 0, 0, 0, 0, 0, 0, 0, 0, # e8 - ef - 0, 0, 0, 0, 0, 0, 0, 0, # f0 - f7 - 0, 0, 0, 0, 0, 0, 4, 5 # f8 - ff -) - -UCS2LE_ST = ( - 6, 6, 7, 6, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f - MachineState.ITS_ME,MachineState.ITS_ME, 5, 5, 5,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#10-17 - 5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR, 6, 6,#18-1f - 7, 6, 8, 8, 5, 5, 5,MachineState.ERROR,#20-27 - 5, 5, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5,#28-2f - 5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR,MachineState.START,MachineState.START #30-37 -) -# fmt: on - -UCS2LE_CHAR_LEN_TABLE = (2, 2, 2, 2, 2, 2) - -UCS2LE_SM_MODEL = { - "class_table": UCS2LE_CLS, - "class_factor": 6, - "state_table": UCS2LE_ST, - "char_len_table": UCS2LE_CHAR_LEN_TABLE, - "name": "UTF-16LE", -} - -# UTF-8 -# fmt: off -UTF8_CLS = ( - 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 #allow 0x00 as a legal value - 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f - 1, 1, 1, 1, 1, 1, 1, 1, # 10 - 17 - 1, 1, 1, 0, 1, 1, 1, 1, # 18 - 1f - 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 27 - 1, 1, 1, 1, 1, 1, 1, 1, # 28 - 2f - 1, 1, 1, 1, 1, 1, 1, 1, # 30 - 37 - 1, 1, 1, 1, 1, 1, 1, 1, # 38 - 3f - 1, 1, 1, 1, 1, 1, 1, 1, # 40 - 47 - 1, 1, 1, 1, 1, 1, 1, 1, # 48 - 4f - 1, 1, 1, 1, 1, 1, 1, 1, # 50 - 57 - 1, 1, 1, 1, 1, 1, 1, 1, # 58 - 5f - 1, 1, 1, 1, 1, 1, 1, 1, # 60 - 67 - 1, 1, 1, 1, 1, 1, 1, 1, # 68 - 6f - 1, 1, 1, 1, 1, 1, 1, 1, # 70 - 77 - 1, 1, 1, 1, 1, 1, 1, 1, # 78 - 7f - 2, 2, 2, 2, 3, 3, 3, 3, # 80 - 87 - 4, 4, 4, 4, 4, 4, 4, 4, # 88 - 8f - 4, 4, 4, 4, 4, 4, 4, 4, # 90 - 97 - 4, 4, 4, 4, 4, 4, 4, 4, # 98 - 9f - 5, 5, 5, 5, 5, 5, 5, 5, # a0 - a7 - 5, 5, 5, 5, 5, 5, 5, 5, # a8 - af - 5, 5, 5, 5, 5, 5, 5, 5, # b0 - b7 - 5, 5, 5, 5, 5, 5, 5, 5, # b8 - bf - 0, 0, 6, 6, 6, 6, 6, 6, # c0 - c7 - 6, 6, 6, 6, 6, 6, 6, 6, # c8 - cf - 6, 6, 6, 6, 6, 6, 6, 6, # d0 - d7 - 6, 6, 6, 6, 6, 6, 6, 6, # d8 - df - 7, 8, 8, 8, 8, 8, 8, 8, # e0 - e7 - 8, 8, 8, 8, 8, 9, 8, 8, # e8 - ef - 10, 11, 11, 11, 11, 11, 11, 11, # f0 - f7 - 12, 13, 13, 13, 14, 15, 0, 0 # f8 - ff -) - -UTF8_ST = ( - MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12, 10,#00-07 - 9, 11, 8, 7, 6, 5, 4, 3,#08-0f - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f - MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#20-27 - MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#28-2f - MachineState.ERROR,MachineState.ERROR, 5, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#30-37 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#38-3f - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#40-47 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#48-4f - MachineState.ERROR,MachineState.ERROR, 7, 7, 7, 7,MachineState.ERROR,MachineState.ERROR,#50-57 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#58-5f - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 7, 7,MachineState.ERROR,MachineState.ERROR,#60-67 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#68-6f - MachineState.ERROR,MachineState.ERROR, 9, 9, 9, 9,MachineState.ERROR,MachineState.ERROR,#70-77 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#78-7f - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 9,MachineState.ERROR,MachineState.ERROR,#80-87 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#88-8f - MachineState.ERROR,MachineState.ERROR, 12, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,#90-97 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#98-9f - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12,MachineState.ERROR,MachineState.ERROR,#a0-a7 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#a8-af - MachineState.ERROR,MachineState.ERROR, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b0-b7 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b8-bf - MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,#c0-c7 - MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR #c8-cf -) -# fmt: on - -UTF8_CHAR_LEN_TABLE = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6) - -UTF8_SM_MODEL = { - "class_table": UTF8_CLS, - "class_factor": 16, - "state_table": UTF8_ST, - "char_len_table": UTF8_CHAR_LEN_TABLE, - "name": "UTF-8", -} diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/command/bdist_rpm.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/command/bdist_rpm.py deleted file mode 100644 index 98bf5dea8468bf1728f18d97d1b9a43be33fdf20..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/command/bdist_rpm.py +++ /dev/null @@ -1,40 +0,0 @@ -import distutils.command.bdist_rpm as orig -import warnings - -from setuptools import SetuptoolsDeprecationWarning - - -class bdist_rpm(orig.bdist_rpm): - """ - Override the default bdist_rpm behavior to do the following: - - 1. Run egg_info to ensure the name and version are properly calculated. - 2. Always run 'install' using --single-version-externally-managed to - disable eggs in RPM distributions. - """ - - def run(self): - warnings.warn( - "bdist_rpm is deprecated and will be removed in a future " - "version. Use bdist_wheel (wheel packages) instead.", - SetuptoolsDeprecationWarning, - ) - - # ensure distro name is up-to-date - self.run_command('egg_info') - - orig.bdist_rpm.run(self) - - def _make_spec_file(self): - spec = orig.bdist_rpm._make_spec_file(self) - spec = [ - line.replace( - "setup.py install ", - "setup.py install --single-version-externally-managed " - ).replace( - "%setup", - "%setup -n %{name}-%{unmangled_version}" - ) - for line in spec - ] - return spec diff --git a/spaces/tomofi/MMOCR/mmocr/models/textrecog/encoders/sar_encoder.py b/spaces/tomofi/MMOCR/mmocr/models/textrecog/encoders/sar_encoder.py deleted file mode 100644 index d2f0a8e13267a2418101429731a559afb265e753..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/mmocr/models/textrecog/encoders/sar_encoder.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -import torch -import torch.nn as nn -import torch.nn.functional as F - -import mmocr.utils as utils -from mmocr.models.builder import ENCODERS -from .base_encoder import BaseEncoder - - -@ENCODERS.register_module() -class SAREncoder(BaseEncoder): - """Implementation of encoder module in `SAR. - - `_. - - Args: - enc_bi_rnn (bool): If True, use bidirectional RNN in encoder. - enc_do_rnn (float): Dropout probability of RNN layer in encoder. - enc_gru (bool): If True, use GRU, else LSTM in encoder. - d_model (int): Dim :math:`D_i` of channels from backbone. - d_enc (int): Dim :math:`D_m` of encoder RNN layer. - mask (bool): If True, mask padding in RNN sequence. - init_cfg (dict or list[dict], optional): Initialization configs. - """ - - def __init__(self, - enc_bi_rnn=False, - enc_do_rnn=0.0, - enc_gru=False, - d_model=512, - d_enc=512, - mask=True, - init_cfg=[ - dict(type='Xavier', layer='Conv2d'), - dict(type='Uniform', layer='BatchNorm2d') - ], - **kwargs): - super().__init__(init_cfg=init_cfg) - assert isinstance(enc_bi_rnn, bool) - assert isinstance(enc_do_rnn, (int, float)) - assert 0 <= enc_do_rnn < 1.0 - assert isinstance(enc_gru, bool) - assert isinstance(d_model, int) - assert isinstance(d_enc, int) - assert isinstance(mask, bool) - - self.enc_bi_rnn = enc_bi_rnn - self.enc_do_rnn = enc_do_rnn - self.mask = mask - - # LSTM Encoder - kwargs = dict( - input_size=d_model, - hidden_size=d_enc, - num_layers=2, - batch_first=True, - dropout=enc_do_rnn, - bidirectional=enc_bi_rnn) - if enc_gru: - self.rnn_encoder = nn.GRU(**kwargs) - else: - self.rnn_encoder = nn.LSTM(**kwargs) - - # global feature transformation - encoder_rnn_out_size = d_enc * (int(enc_bi_rnn) + 1) - self.linear = nn.Linear(encoder_rnn_out_size, encoder_rnn_out_size) - - def forward(self, feat, img_metas=None): - """ - Args: - feat (Tensor): Tensor of shape :math:`(N, D_i, H, W)`. - img_metas (dict): A dict that contains meta information of input - images. Preferably with the key ``valid_ratio``. - - Returns: - Tensor: A tensor of shape :math:`(N, D_m)`. - """ - if img_metas is not None: - assert utils.is_type_list(img_metas, dict) - assert len(img_metas) == feat.size(0) - - valid_ratios = None - if img_metas is not None: - valid_ratios = [ - img_meta.get('valid_ratio', 1.0) for img_meta in img_metas - ] if self.mask else None - - h_feat = feat.size(2) - feat_v = F.max_pool2d( - feat, kernel_size=(h_feat, 1), stride=1, padding=0) - feat_v = feat_v.squeeze(2) # bsz * C * W - feat_v = feat_v.permute(0, 2, 1).contiguous() # bsz * W * C - - holistic_feat = self.rnn_encoder(feat_v)[0] # bsz * T * C - - if valid_ratios is not None: - valid_hf = [] - T = holistic_feat.size(1) - for i, valid_ratio in enumerate(valid_ratios): - valid_step = min(T, math.ceil(T * valid_ratio)) - 1 - valid_hf.append(holistic_feat[i, valid_step, :]) - valid_hf = torch.stack(valid_hf, dim=0) - else: - valid_hf = holistic_feat[:, -1, :] # bsz * C - - holistic_feat = self.linear(valid_hf) # bsz * C - - return holistic_feat diff --git a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/data/samplers/__init__.py b/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/data/samplers/__init__.py deleted file mode 100644 index 27982cbe68c6173a911e700273f25973acbf04bd..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MaskTextSpotterV3-OCR/maskrcnn_benchmark/data/samplers/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -from .distributed import DistributedSampler -from .grouped_batch_sampler import GroupedBatchSampler -from .iteration_based_batch_sampler import IterationBasedBatchSampler - -__all__ = ["DistributedSampler", "GroupedBatchSampler", "IterationBasedBatchSampler"] diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py deleted file mode 100644 index 7569ef3825737cfbf4c2680a655c1b197e0a8053..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py +++ /dev/null @@ -1,16 +0,0 @@ -_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' -model = dict( - pretrained='open-mmlab://regnetx_6.4gf', - backbone=dict( - type='RegNet', - arch='regnetx_6.4gf', - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch'), - neck=dict( - type='FPN', - in_channels=[168, 392, 784, 1624], - out_channels=256, - num_outs=5)) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/retinanet/retinanet_r101_fpn_1x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/retinanet/retinanet_r101_fpn_1x_coco.py deleted file mode 100644 index 1e6f46340d551abaa22ff2176bec22824188d6cb..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/retinanet/retinanet_r101_fpn_1x_coco.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './retinanet_r50_fpn_1x_coco.py' -model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/utils/__init__.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/utils/__init__.py deleted file mode 100644 index ac489e2dbbc0e6fa87f5088b4edcc20f8cadc1a6..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/utils/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .collect_env import collect_env -from .logger import get_root_logger - -__all__ = ['get_root_logger', 'collect_env'] diff --git a/spaces/tracinginsights/F1-analysis/pages/Drivers_in_Traffic.py b/spaces/tracinginsights/F1-analysis/pages/Drivers_in_Traffic.py deleted file mode 100644 index 829b39670d4b8b55a0c2bd38f20200101b01ce3a..0000000000000000000000000000000000000000 --- a/spaces/tracinginsights/F1-analysis/pages/Drivers_in_Traffic.py +++ /dev/null @@ -1,30 +0,0 @@ -import streamlit as st -from repo_directory import Drivers_in_Traffic -from repo_directory import button - -st.write('Takes more than 5 minutes.') - -YEAR_SELECTED = st.selectbox( - 'Select year', - (2023, 2022, 2021, 2020, 2019, 2018)) - - - -RACE_SELECTED = st.selectbox( - 'Select Race', - (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23)) - -SESSION = st.selectbox( - 'Select Session', - ('R', 'SQ', 'FP1', 'FP2', 'FP3', 'Q', 'SS')) - -laps, f1session, drivers, driver_colors = Drivers_in_Traffic.get_data(YEAR_SELECTED, RACE_SELECTED, SESSION) - -DRIVERS_SELECTED = st.multiselect( - 'Select Drivers', - drivers, - ) - -tel = Drivers_in_Traffic.get_tel(laps, driver_colors) - -Drivers_in_Traffic.plot(tel, DRIVERS_SELECTED) \ No newline at end of file diff --git a/spaces/ucalyptus/PTI/scripts/latent_creators/e4e_latent_creator.py b/spaces/ucalyptus/PTI/scripts/latent_creators/e4e_latent_creator.py deleted file mode 100644 index 5726a3e286374020609a1d58708fa2659ba73b22..0000000000000000000000000000000000000000 --- a/spaces/ucalyptus/PTI/scripts/latent_creators/e4e_latent_creator.py +++ /dev/null @@ -1,44 +0,0 @@ -import torch -from argparse import Namespace -from torchvision.transforms import transforms - -from configs import paths_config -from models.e4e.psp import pSp -from scripts.latent_creators.base_latent_creator import BaseLatentCreator -from utils.log_utils import log_image_from_w - - -class E4ELatentCreator(BaseLatentCreator): - - def __init__(self, use_wandb=False): - self.e4e_inversion_pre_process = transforms.Compose([ - transforms.Resize((256, 256)), - transforms.ToTensor(), - transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) - - super().__init__('e4e', self.e4e_inversion_pre_process, use_wandb=use_wandb) - - e4e_model_path = paths_config.e4e - ckpt = torch.load(e4e_model_path, map_location='cpu') - opts = ckpt['opts'] - opts['batch_size'] = 1 - opts['checkpoint_path'] = e4e_model_path - opts = Namespace(**opts) - self.e4e_inversion_net = pSp(opts) - self.e4e_inversion_net.eval() - self.e4e_inversion_net = self.e4e_inversion_net.cuda() - - def run_projection(self, fname, image): - _, e4e_image_latent = self.e4e_inversion_net(image, randomize_noise=False, return_latents=True, - resize=False, - input_code=False) - - if self.use_wandb: - log_image_from_w(e4e_image_latent, self.old_G, 'First e4e inversion') - - return e4e_image_latent - - -if __name__ == '__main__': - e4e_latent_creator = E4ELatentCreator() - e4e_latent_creator.create_latents() diff --git a/spaces/ulysses115/ulysses115-pmvoice/losses.py b/spaces/ulysses115/ulysses115-pmvoice/losses.py deleted file mode 100644 index fb22a0e834dd87edaa37bb8190eee2c3c7abe0d5..0000000000000000000000000000000000000000 --- a/spaces/ulysses115/ulysses115-pmvoice/losses.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -from torch.nn import functional as F - -import commons - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - rl = rl.float().detach() - gl = gl.float() - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - dr = dr.float() - dg = dg.float() - r_loss = torch.mean((1-dr)**2) - g_loss = torch.mean(dg**2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - dg = dg.float() - l = torch.mean((1-dg)**2) - gen_losses.append(l) - loss += l - - return loss, gen_losses - - -def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): - """ - z_p, logs_q: [b, h, t_t] - m_p, logs_p: [b, h, t_t] - """ - z_p = z_p.float() - logs_q = logs_q.float() - m_p = m_p.float() - logs_p = logs_p.float() - z_mask = z_mask.float() - - kl = logs_p - logs_q - 0.5 - kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p) - kl = torch.sum(kl * z_mask) - l = kl / torch.sum(z_mask) - return l diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/3d Scanner Kinect Reconstructme Crack _VERIFIED_.md b/spaces/usbethFlerru/sovits-modelsV2/example/3d Scanner Kinect Reconstructme Crack _VERIFIED_.md deleted file mode 100644 index b8c942f2aad1f6563ef9566e609b6cb252f62f78..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/3d Scanner Kinect Reconstructme Crack _VERIFIED_.md +++ /dev/null @@ -1,8 +0,0 @@ -

                3d Scanner Kinect Reconstructme Crack


                Download ⚙⚙⚙ https://urlcod.com/2uyWkf



                -
                -raster image processor - -- 4fefd39f24
                -
                -
                -

                diff --git a/spaces/verkaDerkaDerk/face-mesh-workflow/meshin-around.sh b/spaces/verkaDerkaDerk/face-mesh-workflow/meshin-around.sh deleted file mode 100644 index 5a56fed970b5faf80e490769f93b7ca8bbd12cda..0000000000000000000000000000000000000000 --- a/spaces/verkaDerkaDerk/face-mesh-workflow/meshin-around.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -_meshin_around_main() { - local mesh="${1}" ; shift - local mash="${1-fun.obj}" ; shift - - if [ "" = "${mesh}" ] ; then - echo "usage: meshin-around.sh " - return 1 - fi - - local name=$( basename ${mash} | sed 's,\.obj$,,' ) - local mtl="${name}.mtl" - local png="${name}.png" - - if [ -f ${mash} ] ; then - echo "${mash} already exists" - else - echo "creating ${mash} for ${mesh}" - sed "s,^f.*,,;s,#f,f,;s,.*mtllib.*,mtllib ${mtl},;s,^usemtl .*,usemtl ${name}Material," ${mesh} > ${mash} || exit ${?} - fi - - if [ -f "${mtl}" ] ; then - echo "${mtl} already exists" - else - echo "creating ${mtl} for ${mash}" - echo -e "newmtl ${name}Material\nmap_Kd ${png}" > ${mtl} || exit ${?} - fi - - if [ -f "${png}" ] ; then - echo "${png} looks good" - else - echo "be sure your texture is in pwd and named ${png} or edit ${mtl}" - fi -} - -_meshin_around_main ${*} diff --git a/spaces/webis-huggingface-workshop/ferdi_demo/app.py b/spaces/webis-huggingface-workshop/ferdi_demo/app.py deleted file mode 100644 index 201260ccf819291c79e25cad8002b6fafba0f154..0000000000000000000000000000000000000000 --- a/spaces/webis-huggingface-workshop/ferdi_demo/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("huggingface/sentence-transformers/all-MiniLM-L6-v2").launch() diff --git a/spaces/wffcyrus/MetaGPT-v1/tests/metagpt/learn/test_google_search.py b/spaces/wffcyrus/MetaGPT-v1/tests/metagpt/learn/test_google_search.py deleted file mode 100644 index da32e8923e49df661ffdd24c22001682171e573b..0000000000000000000000000000000000000000 --- a/spaces/wffcyrus/MetaGPT-v1/tests/metagpt/learn/test_google_search.py +++ /dev/null @@ -1,27 +0,0 @@ -import asyncio - -from pydantic import BaseModel - -from metagpt.learn.google_search import google_search - - -async def mock_google_search(): - class Input(BaseModel): - input: str - - inputs = [{"input": "ai agent"}] - - for i in inputs: - seed = Input(**i) - result = await google_search(seed.input) - assert result != "" - - -def test_suite(): - loop = asyncio.get_event_loop() - task = loop.create_task(mock_google_search()) - loop.run_until_complete(task) - - -if __name__ == "__main__": - test_suite() diff --git a/spaces/whitphx/gradio-static-test/dist/assets/index-d7bb690e.js b/spaces/whitphx/gradio-static-test/dist/assets/index-d7bb690e.js deleted file mode 100644 index 7441d155a3ee873424c683067120d849255002b8..0000000000000000000000000000000000000000 --- a/spaces/whitphx/gradio-static-test/dist/assets/index-d7bb690e.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as I,i as J,s as K,H as y,e as v,I as L,D as S,h as j,m as k,F as z,af as D,K as T,q as w,t as C,r as q,o as B,a1 as N,u as O,aa as Q,J as R,L as V,z as g,ag as h,y as W,a0 as X,b as E,a as F,j as Y,k as Z,l as H}from"../lite.js";/* empty css */import{B as p}from"./Button-0391b19a.js";/* empty css */import{B as x}from"./BlockTitle-a953ec46.js";import"./Info-7e9477b8.js";function $(t){let e;return{c(){e=R(t[1])},m(l,s){j(l,e,s)},p(l,s){s&2&&V(e,l[1])},d(l){l&&q(e)}}}function ee(t){let e,l,s,n,_,r,c;return l=new x({props:{show_label:t[4],info:t[2],$$slots:{default:[$]},$$scope:{ctx:t}}}),{c(){e=y("label"),v(l.$$.fragment),s=L(),n=y("input"),S(n,"type","color"),n.disabled=t[3],S(n,"class","svelte-56zyyb"),S(e,"class","block")},m(o,f){j(o,e,f),k(l,e,null),z(e,s),z(e,n),D(n,t[0]),_=!0,r||(c=[T(n,"blur",t[6]),T(n,"input",t[7])],r=!0)},p(o,[f]){const b={};f&16&&(b.show_label=o[4]),f&4&&(b.info=o[2]),f&1026&&(b.$$scope={dirty:f,ctx:o}),l.$set(b),(!_||f&8)&&(n.disabled=o[3]),f&1&&D(n,o[0])},i(o){_||(w(l.$$.fragment,o),_=!0)},o(o){C(l.$$.fragment,o),_=!1},d(o){o&&q(e),B(l),r=!1,N(c)}}}function te(t,e,l){let{value:s="#000000"}=e,{value_is_output:n=!1}=e,{label:_}=e,{info:r=void 0}=e,{disabled:c=!1}=e,{show_label:o=!0}=e;const f=O();function b(){f("change",s),n||f("input")}Q(()=>{l(5,n=!1)});function d(u){g.call(this,t,u)}function i(){s=this.value,l(0,s)}return t.$$set=u=>{"value"in u&&l(0,s=u.value),"value_is_output"in u&&l(5,n=u.value_is_output),"label"in u&&l(1,_=u.label),"info"in u&&l(2,r=u.info),"disabled"in u&&l(3,c=u.disabled),"show_label"in u&&l(4,o=u.show_label)},t.$$.update=()=>{t.$$.dirty&1&&b()},[s,_,r,c,o,n,d,i]}class le extends I{constructor(e){super(),J(this,e,te,ee,K,{value:0,value_is_output:5,label:1,info:2,disabled:3,show_label:4})}}function se(t){let e,l,s,n,_,r;const c=[t[9]];let o={};for(let i=0;iF(s,"value",f)),E.push(()=>F(s,"value_is_output",b)),s.$on("change",t[13]),s.$on("input",t[14]),s.$on("submit",t[15]),s.$on("blur",t[16]),{c(){v(e.$$.fragment),l=L(),v(s.$$.fragment)},m(i,u){k(e,i,u),j(i,l,u),k(s,i,u),r=!0},p(i,u){const P=u&512?Y(c,[Z(i[9])]):{};e.$set(P);const m={};u&4&&(m.label=i[2]),u&8&&(m.info=i[3]),u&128&&(m.show_label=i[7]),u&1024&&(m.disabled=i[10]==="static"),!n&&u&1&&(n=!0,m.value=i[0],H(()=>n=!1)),!_&&u&2&&(_=!0,m.value_is_output=i[1],H(()=>_=!1)),s.$set(m)},i(i){r||(w(e.$$.fragment,i),w(s.$$.fragment,i),r=!0)},o(i){C(e.$$.fragment,i),C(s.$$.fragment,i),r=!1},d(i){B(e,i),i&&q(l),B(s,i)}}}function ie(t){let e,l;return e=new p({props:{visible:t[6],elem_id:t[4],elem_classes:t[5],disable:typeof t[8].container=="boolean"&&!t[8].container,$$slots:{default:[se]},$$scope:{ctx:t}}}),{c(){v(e.$$.fragment)},m(s,n){k(e,s,n),l=!0},p(s,[n]){const _={};n&64&&(_.visible=s[6]),n&16&&(_.elem_id=s[4]),n&32&&(_.elem_classes=s[5]),n&256&&(_.disable=typeof s[8].container=="boolean"&&!s[8].container),n&132751&&(_.$$scope={dirty:n,ctx:s}),e.$set(_)},i(s){l||(w(e.$$.fragment,s),l=!0)},o(s){C(e.$$.fragment,s),l=!1},d(s){B(e,s)}}}function ae(t,e,l){let{label:s="ColorPicker"}=e,{info:n=void 0}=e,{elem_id:_=""}=e,{elem_classes:r=[]}=e,{visible:c=!0}=e,{value:o}=e,{value_is_output:f=!1}=e,{show_label:b}=e,{style:d={}}=e,{loading_status:i}=e,{mode:u}=e;function P(a){o=a,l(0,o)}function m(a){f=a,l(1,f)}function U(a){g.call(this,t,a)}function A(a){g.call(this,t,a)}function G(a){g.call(this,t,a)}function M(a){g.call(this,t,a)}return t.$$set=a=>{"label"in a&&l(2,s=a.label),"info"in a&&l(3,n=a.info),"elem_id"in a&&l(4,_=a.elem_id),"elem_classes"in a&&l(5,r=a.elem_classes),"visible"in a&&l(6,c=a.visible),"value"in a&&l(0,o=a.value),"value_is_output"in a&&l(1,f=a.value_is_output),"show_label"in a&&l(7,b=a.show_label),"style"in a&&l(8,d=a.style),"loading_status"in a&&l(9,i=a.loading_status),"mode"in a&&l(10,u=a.mode)},[o,f,s,n,_,r,c,b,d,i,u,P,m,U,A,G,M]}class ne extends I{constructor(e){super(),J(this,e,ae,ie,K,{label:2,info:3,elem_id:4,elem_classes:5,visible:6,value:0,value_is_output:1,show_label:7,style:8,loading_status:9,mode:10})}get label(){return this.$$.ctx[2]}set label(e){this.$$set({label:e}),h()}get info(){return this.$$.ctx[3]}set info(e){this.$$set({info:e}),h()}get elem_id(){return this.$$.ctx[4]}set elem_id(e){this.$$set({elem_id:e}),h()}get elem_classes(){return this.$$.ctx[5]}set elem_classes(e){this.$$set({elem_classes:e}),h()}get visible(){return this.$$.ctx[6]}set visible(e){this.$$set({visible:e}),h()}get value(){return this.$$.ctx[0]}set value(e){this.$$set({value:e}),h()}get value_is_output(){return this.$$.ctx[1]}set value_is_output(e){this.$$set({value_is_output:e}),h()}get show_label(){return this.$$.ctx[7]}set show_label(e){this.$$set({show_label:e}),h()}get style(){return this.$$.ctx[8]}set style(e){this.$$set({style:e}),h()}get loading_status(){return this.$$.ctx[9]}set loading_status(e){this.$$set({loading_status:e}),h()}get mode(){return this.$$.ctx[10]}set mode(e){this.$$set({mode:e}),h()}}const be=ne,he=["static","dynamic"],me=t=>({type:{payload:"string"},description:{payload:"hex color code"},example_data:t.value??"#000000"});export{be as Component,me as document,he as modes}; -//# sourceMappingURL=index-d7bb690e.js.map diff --git a/spaces/williamberman/stable-diffusion-xl-inpainting/load_state_dict_patch.py b/spaces/williamberman/stable-diffusion-xl-inpainting/load_state_dict_patch.py deleted file mode 100644 index 623d6068dbd9388064d39fce9a74fe2816330053..0000000000000000000000000000000000000000 --- a/spaces/williamberman/stable-diffusion-xl-inpainting/load_state_dict_patch.py +++ /dev/null @@ -1,415 +0,0 @@ -import itertools -from collections import OrderedDict -from typing import Any, List, Mapping - -import torch -from torch.nn import Module -from torch.nn.modules.module import _EXTRA_STATE_KEY_SUFFIX, _IncompatibleKeys - -# fmt: off - -# this patch is for adding the `assign` key to load_state_dict. -# the code is in pytorch source for version 2.1 - -def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, - missing_keys, unexpected_keys, error_msgs): - r"""Copies parameters and buffers from :attr:`state_dict` into only - this module, but not its descendants. This is called on every submodule - in :meth:`~torch.nn.Module.load_state_dict`. Metadata saved for this - module in input :attr:`state_dict` is provided as :attr:`local_metadata`. - For state dicts without metadata, :attr:`local_metadata` is empty. - Subclasses can achieve class-specific backward compatible loading using - the version number at `local_metadata.get("version", None)`. - Additionally, :attr:`local_metadata` can also contain the key - `assign_to_params_buffers` that indicates whether keys should be - assigned their corresponding tensor in the state_dict. - - .. note:: - :attr:`state_dict` is not the same object as the input - :attr:`state_dict` to :meth:`~torch.nn.Module.load_state_dict`. So - it can be modified. - - Args: - state_dict (dict): a dict containing parameters and - persistent buffers. - prefix (str): the prefix for parameters and buffers used in this - module - local_metadata (dict): a dict containing the metadata for this module. - See - strict (bool): whether to strictly enforce that the keys in - :attr:`state_dict` with :attr:`prefix` match the names of - parameters and buffers in this module - missing_keys (list of str): if ``strict=True``, add missing keys to - this list - unexpected_keys (list of str): if ``strict=True``, add unexpected - keys to this list - error_msgs (list of str): error messages should be added to this - list, and will be reported together in - :meth:`~torch.nn.Module.load_state_dict` - """ - for hook in self._load_state_dict_pre_hooks.values(): - hook(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) - - persistent_buffers = {k: v for k, v in self._buffers.items() if k not in self._non_persistent_buffers_set} - local_name_params = itertools.chain(self._parameters.items(), persistent_buffers.items()) - local_state = {k: v for k, v in local_name_params if v is not None} - assign_to_params_buffers = local_metadata.get("assign_to_params_buffers", False) - - for name, param in local_state.items(): - key = prefix + name - if key in state_dict: - input_param = state_dict[key] - if not torch.overrides.is_tensor_like(input_param): - error_msgs.append('While copying the parameter named "{}", ' - 'expected torch.Tensor or Tensor-like object from checkpoint but ' - 'received {}' - .format(key, type(input_param))) - continue - - # This is used to avoid copying uninitialized parameters into - # non-lazy modules, since they dont have the hook to do the checks - # in such case, it will error when accessing the .shape attribute. - is_param_lazy = torch.nn.parameter.is_lazy(param) - # Backward compatibility: loading 1-dim tensor from 0.3.* to version 0.4+ - if not is_param_lazy and len(param.shape) == 0 and len(input_param.shape) == 1: - input_param = input_param[0] - - if not is_param_lazy and input_param.shape != param.shape: - # local shape should match the one in checkpoint - error_msgs.append('size mismatch for {}: copying a param with shape {} from checkpoint, ' - 'the shape in current model is {}.' - .format(key, input_param.shape, param.shape)) - continue - try: - with torch.no_grad(): - if assign_to_params_buffers: - # Shape checks are already done above - if (isinstance(param, torch.nn.Parameter) and - not isinstance(input_param, torch.nn.Parameter)): - setattr(self, name, torch.nn.Parameter(input_param)) - else: - setattr(self, name, input_param) - else: - param.copy_(input_param) - except Exception as ex: - error_msgs.append('While copying the parameter named "{}", ' - 'whose dimensions in the model are {} and ' - 'whose dimensions in the checkpoint are {}, ' - 'an exception occurred : {}.' - .format(key, param.size(), input_param.size(), ex.args)) - elif strict: - missing_keys.append(key) - - extra_state_key = prefix + _EXTRA_STATE_KEY_SUFFIX - if getattr(self.__class__, "set_extra_state", Module.set_extra_state) is not Module.set_extra_state: - if extra_state_key in state_dict: - self.set_extra_state(state_dict[extra_state_key]) - elif strict: - missing_keys.append(extra_state_key) - elif strict and (extra_state_key in state_dict): - unexpected_keys.append(extra_state_key) - - if strict: - for key in state_dict.keys(): - if key.startswith(prefix) and key != extra_state_key: - input_name = key[len(prefix):] - input_name = input_name.split('.', 1)[0] # get the name of param/buffer/child - if input_name not in self._modules and input_name not in local_state: - unexpected_keys.append(key) - -def load_state_dict(self, state_dict: Mapping[str, Any], - strict: bool = True, assign: bool = False): - r"""Copies parameters and buffers from :attr:`state_dict` into - this module and its descendants. If :attr:`strict` is ``True``, then - the keys of :attr:`state_dict` must exactly match the keys returned - by this module's :meth:`~torch.nn.Module.state_dict` function. - - .. warning:: - If :attr:`assign` is ``True`` the optimizer must be created after - the call to :attr:`load_state_dict`. - - Args: - state_dict (dict): a dict containing parameters and - persistent buffers. - strict (bool, optional): whether to strictly enforce that the keys - in :attr:`state_dict` match the keys returned by this module's - :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` - assign (bool, optional): whether to assign items in the state - dictionary to their corresponding keys in the module instead - of copying them inplace into the module's current parameters and buffers. - When ``False``, the properties of the tensors in the current - module are preserved while when ``True``, the properties of the - Tensors in the state dict are preserved. - Default: ``False`` - - Returns: - ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: - * **missing_keys** is a list of str containing the missing keys - * **unexpected_keys** is a list of str containing the unexpected keys - - Note: - If a parameter or buffer is registered as ``None`` and its corresponding key - exists in :attr:`state_dict`, :meth:`load_state_dict` will raise a - ``RuntimeError``. - """ - if not isinstance(state_dict, Mapping): - raise TypeError("Expected state_dict to be dict-like, got {}.".format(type(state_dict))) - - missing_keys: List[str] = [] - unexpected_keys: List[str] = [] - error_msgs: List[str] = [] - - # copy state_dict so _load_from_state_dict can modify it - metadata = getattr(state_dict, '_metadata', None) - state_dict = OrderedDict(state_dict) - if metadata is not None: - # mypy isn't aware that "_metadata" exists in state_dict - state_dict._metadata = metadata # type: ignore[attr-defined] - - def load(module, local_state_dict, prefix=''): - local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) - if assign: - local_metadata['assign_to_params_buffers'] = assign - module._load_from_state_dict( - local_state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) - for name, child in module._modules.items(): - if child is not None: - child_prefix = prefix + name + '.' - child_state_dict = {k: v for k, v in local_state_dict.items() if k.startswith(child_prefix)} - load(child, child_state_dict, child_prefix) - - # Note that the hook can modify missing_keys and unexpected_keys. - incompatible_keys = _IncompatibleKeys(missing_keys, unexpected_keys) - for hook in module._load_state_dict_post_hooks.values(): - out = hook(module, incompatible_keys) - assert out is None, ( - "Hooks registered with ``register_load_state_dict_post_hook`` are not" - "expected to return new values, if incompatible_keys need to be modified," - "it should be done inplace." - ) - - load(self, state_dict) - del load - - if strict: - if len(unexpected_keys) > 0: - error_msgs.insert( - 0, 'Unexpected key(s) in state_dict: {}. '.format( - ', '.join('"{}"'.format(k) for k in unexpected_keys))) - if len(missing_keys) > 0: - error_msgs.insert( - 0, 'Missing key(s) in state_dict: {}. '.format( - ', '.join('"{}"'.format(k) for k in missing_keys))) - - if len(error_msgs) > 0: - raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( - self.__class__.__name__, "\n\t".join(error_msgs))) - return _IncompatibleKeys(missing_keys, unexpected_keys) - -if [int(x) for x in torch.__version__.split('.')[0:2]] < [2, 1]: - Module._load_from_state_dict = _load_from_state_dict - Module.load_state_dict = load_state_dict - -# this patch is for adding the `assign` key to load_state_dict. -# the code is in pytorch source for version 2.1 - -def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, - missing_keys, unexpected_keys, error_msgs): - r"""Copies parameters and buffers from :attr:`state_dict` into only - this module, but not its descendants. This is called on every submodule - in :meth:`~torch.nn.Module.load_state_dict`. Metadata saved for this - module in input :attr:`state_dict` is provided as :attr:`local_metadata`. - For state dicts without metadata, :attr:`local_metadata` is empty. - Subclasses can achieve class-specific backward compatible loading using - the version number at `local_metadata.get("version", None)`. - Additionally, :attr:`local_metadata` can also contain the key - `assign_to_params_buffers` that indicates whether keys should be - assigned their corresponding tensor in the state_dict. - - .. note:: - :attr:`state_dict` is not the same object as the input - :attr:`state_dict` to :meth:`~torch.nn.Module.load_state_dict`. So - it can be modified. - - Args: - state_dict (dict): a dict containing parameters and - persistent buffers. - prefix (str): the prefix for parameters and buffers used in this - module - local_metadata (dict): a dict containing the metadata for this module. - See - strict (bool): whether to strictly enforce that the keys in - :attr:`state_dict` with :attr:`prefix` match the names of - parameters and buffers in this module - missing_keys (list of str): if ``strict=True``, add missing keys to - this list - unexpected_keys (list of str): if ``strict=True``, add unexpected - keys to this list - error_msgs (list of str): error messages should be added to this - list, and will be reported together in - :meth:`~torch.nn.Module.load_state_dict` - """ - for hook in self._load_state_dict_pre_hooks.values(): - hook(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) - - persistent_buffers = {k: v for k, v in self._buffers.items() if k not in self._non_persistent_buffers_set} - local_name_params = itertools.chain(self._parameters.items(), persistent_buffers.items()) - local_state = {k: v for k, v in local_name_params if v is not None} - assign_to_params_buffers = local_metadata.get("assign_to_params_buffers", False) - - for name, param in local_state.items(): - key = prefix + name - if key in state_dict: - input_param = state_dict[key] - if not torch.overrides.is_tensor_like(input_param): - error_msgs.append('While copying the parameter named "{}", ' - 'expected torch.Tensor or Tensor-like object from checkpoint but ' - 'received {}' - .format(key, type(input_param))) - continue - - # This is used to avoid copying uninitialized parameters into - # non-lazy modules, since they dont have the hook to do the checks - # in such case, it will error when accessing the .shape attribute. - is_param_lazy = torch.nn.parameter.is_lazy(param) - # Backward compatibility: loading 1-dim tensor from 0.3.* to version 0.4+ - if not is_param_lazy and len(param.shape) == 0 and len(input_param.shape) == 1: - input_param = input_param[0] - - if not is_param_lazy and input_param.shape != param.shape: - # local shape should match the one in checkpoint - error_msgs.append('size mismatch for {}: copying a param with shape {} from checkpoint, ' - 'the shape in current model is {}.' - .format(key, input_param.shape, param.shape)) - continue - try: - with torch.no_grad(): - if assign_to_params_buffers: - # Shape checks are already done above - if (isinstance(param, torch.nn.Parameter) and - not isinstance(input_param, torch.nn.Parameter)): - setattr(self, name, torch.nn.Parameter(input_param)) - else: - setattr(self, name, input_param) - else: - param.copy_(input_param) - except Exception as ex: - error_msgs.append('While copying the parameter named "{}", ' - 'whose dimensions in the model are {} and ' - 'whose dimensions in the checkpoint are {}, ' - 'an exception occurred : {}.' - .format(key, param.size(), input_param.size(), ex.args)) - elif strict: - missing_keys.append(key) - - extra_state_key = prefix + _EXTRA_STATE_KEY_SUFFIX - if getattr(self.__class__, "set_extra_state", Module.set_extra_state) is not Module.set_extra_state: - if extra_state_key in state_dict: - self.set_extra_state(state_dict[extra_state_key]) - elif strict: - missing_keys.append(extra_state_key) - elif strict and (extra_state_key in state_dict): - unexpected_keys.append(extra_state_key) - - if strict: - for key in state_dict.keys(): - if key.startswith(prefix) and key != extra_state_key: - input_name = key[len(prefix):] - input_name = input_name.split('.', 1)[0] # get the name of param/buffer/child - if input_name not in self._modules and input_name not in local_state: - unexpected_keys.append(key) - -def load_state_dict(self, state_dict: Mapping[str, Any], - strict: bool = True, assign: bool = False): - r"""Copies parameters and buffers from :attr:`state_dict` into - this module and its descendants. If :attr:`strict` is ``True``, then - the keys of :attr:`state_dict` must exactly match the keys returned - by this module's :meth:`~torch.nn.Module.state_dict` function. - - .. warning:: - If :attr:`assign` is ``True`` the optimizer must be created after - the call to :attr:`load_state_dict`. - - Args: - state_dict (dict): a dict containing parameters and - persistent buffers. - strict (bool, optional): whether to strictly enforce that the keys - in :attr:`state_dict` match the keys returned by this module's - :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` - assign (bool, optional): whether to assign items in the state - dictionary to their corresponding keys in the module instead - of copying them inplace into the module's current parameters and buffers. - When ``False``, the properties of the tensors in the current - module are preserved while when ``True``, the properties of the - Tensors in the state dict are preserved. - Default: ``False`` - - Returns: - ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: - * **missing_keys** is a list of str containing the missing keys - * **unexpected_keys** is a list of str containing the unexpected keys - - Note: - If a parameter or buffer is registered as ``None`` and its corresponding key - exists in :attr:`state_dict`, :meth:`load_state_dict` will raise a - ``RuntimeError``. - """ - if not isinstance(state_dict, Mapping): - raise TypeError("Expected state_dict to be dict-like, got {}.".format(type(state_dict))) - - missing_keys: List[str] = [] - unexpected_keys: List[str] = [] - error_msgs: List[str] = [] - - # copy state_dict so _load_from_state_dict can modify it - metadata = getattr(state_dict, '_metadata', None) - state_dict = OrderedDict(state_dict) - if metadata is not None: - # mypy isn't aware that "_metadata" exists in state_dict - state_dict._metadata = metadata # type: ignore[attr-defined] - - def load(module, local_state_dict, prefix=''): - local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) - if assign: - local_metadata['assign_to_params_buffers'] = assign - module._load_from_state_dict( - local_state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) - for name, child in module._modules.items(): - if child is not None: - child_prefix = prefix + name + '.' - child_state_dict = {k: v for k, v in local_state_dict.items() if k.startswith(child_prefix)} - load(child, child_state_dict, child_prefix) - - # Note that the hook can modify missing_keys and unexpected_keys. - incompatible_keys = _IncompatibleKeys(missing_keys, unexpected_keys) - for hook in module._load_state_dict_post_hooks.values(): - out = hook(module, incompatible_keys) - assert out is None, ( - "Hooks registered with ``register_load_state_dict_post_hook`` are not" - "expected to return new values, if incompatible_keys need to be modified," - "it should be done inplace." - ) - - load(self, state_dict) - del load - - if strict: - if len(unexpected_keys) > 0: - error_msgs.insert( - 0, 'Unexpected key(s) in state_dict: {}. '.format( - ', '.join('"{}"'.format(k) for k in unexpected_keys))) - if len(missing_keys) > 0: - error_msgs.insert( - 0, 'Missing key(s) in state_dict: {}. '.format( - ', '.join('"{}"'.format(k) for k in missing_keys))) - - if len(error_msgs) > 0: - raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format( - self.__class__.__name__, "\n\t".join(error_msgs))) - return _IncompatibleKeys(missing_keys, unexpected_keys) - -if [int(x) for x in torch.__version__.split('.')[0:2]] < [2, 1]: - Module._load_from_state_dict = _load_from_state_dict - Module.load_state_dict = load_state_dict - -# fmt: on diff --git a/spaces/wilson1/bingo/src/lib/hooks/chat-history.ts b/spaces/wilson1/bingo/src/lib/hooks/chat-history.ts deleted file mode 100644 index c6fbf3fecfa86fe553f56acc8253236b8f22a775..0000000000000000000000000000000000000000 --- a/spaces/wilson1/bingo/src/lib/hooks/chat-history.ts +++ /dev/null @@ -1,62 +0,0 @@ -import { zip } from 'lodash-es' -import { ChatMessageModel, BotId } from '@/lib/bots/bing/types' -import { Storage } from '../storage' - -/** - * conversations:$botId => Conversation[] - * conversation:$botId:$cid:messages => ChatMessageModel[] - */ - -interface Conversation { - id: string - createdAt: number -} - -type ConversationWithMessages = Conversation & { messages: ChatMessageModel[] } - -async function loadHistoryConversations(botId: BotId): Promise { - const key = `conversations:${botId}` - const { [key]: value } = await Storage.get(key) - return value || [] -} - -async function deleteHistoryConversation(botId: BotId, cid: string) { - const conversations = await loadHistoryConversations(botId) - const newConversations = conversations.filter((c) => c.id !== cid) - await Storage.set({ [`conversations:${botId}`]: newConversations }) -} - -async function loadConversationMessages(botId: BotId, cid: string): Promise { - const key = `conversation:${botId}:${cid}:messages` - const { [key]: value } = await Storage.get(key) - return value || [] -} - -export async function setConversationMessages(botId: BotId, cid: string, messages: ChatMessageModel[]) { - const conversations = await loadHistoryConversations(botId) - if (!conversations.some((c) => c.id === cid)) { - conversations.unshift({ id: cid, createdAt: Date.now() }) - await Storage.set({ [`conversations:${botId}`]: conversations }) - } - const key = `conversation:${botId}:${cid}:messages` - await Storage.set({ [key]: messages }) -} - -export async function loadHistoryMessages(botId: BotId): Promise { - const conversations = await loadHistoryConversations(botId) - const messagesList = await Promise.all(conversations.map((c) => loadConversationMessages(botId, c.id))) - return zip(conversations, messagesList).map(([c, messages]) => ({ - id: c!.id, - createdAt: c!.createdAt, - messages: messages!, - })) -} - -export async function deleteHistoryMessage(botId: BotId, conversationId: string, messageId: string) { - const messages = await loadConversationMessages(botId, conversationId) - const newMessages = messages.filter((m) => m.id !== messageId) - await setConversationMessages(botId, conversationId, newMessages) - if (!newMessages.length) { - await deleteHistoryConversation(botId, conversationId) - } -} diff --git a/spaces/windmaple/stable-diffusion-2/README.md b/spaces/windmaple/stable-diffusion-2/README.md deleted file mode 100644 index 1f42390fdf8551c953a054198d3ce5c04a67fe7d..0000000000000000000000000000000000000000 --- a/spaces/windmaple/stable-diffusion-2/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Stable Diffusion 2 -emoji: 🔮 -colorFrom: gray -colorTo: pink -sdk: static -pinned: true -property: test -license: mit -duplicated_from: stabilityai/stable-diffusion ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/wz758727829/ChuanhuChatGPT/overwrites.py b/spaces/wz758727829/ChuanhuChatGPT/overwrites.py deleted file mode 100644 index 436fcf46b5807ca045e77ac762039ba0ffc16f6d..0000000000000000000000000000000000000000 --- a/spaces/wz758727829/ChuanhuChatGPT/overwrites.py +++ /dev/null @@ -1,38 +0,0 @@ -from __future__ import annotations -import logging - -from llama_index import Prompt -from typing import List, Tuple -import mdtex2html - -from presets import * -from llama_func import * - - -def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[str]: - logging.debug("Compacting text chunks...🚀🚀🚀") - combined_str = [c.strip() for c in text_chunks if c.strip()] - combined_str = [f"[{index+1}] {c}" for index, c in enumerate(combined_str)] - combined_str = "\n\n".join(combined_str) - # resplit based on self.max_chunk_overlap - text_splitter = self.get_text_splitter_given_prompt(prompt, 1, padding=1) - return text_splitter.split_text(combined_str) - - -def postprocess( - self, y: List[Tuple[str | None, str | None]] -) -> List[Tuple[str | None, str | None]]: - """ - Parameters: - y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. - Returns: - List of tuples representing the message and response. Each message and response will be a string of HTML. - """ - if y is None or y == []: - return [] - tag_regex = re.compile(r"^<\w+>[^<]+") - if tag_regex.search(y[-1][1]): - y[-1] = (y[-1][0].replace("\n", "
                "), y[-1][1]) - else: - y[-1] = (y[-1][0].replace("\n", "
                "), convert_mdtext(y[-1][1])) - return y diff --git a/spaces/xcchen/xcchenvits-uma-genshin-honkai/modules.py b/spaces/xcchen/xcchenvits-uma-genshin-honkai/modules.py deleted file mode 100644 index 56ea4145eddf19dd330a3a41ab0183efc1686d83..0000000000000000000000000000000000000000 --- a/spaces/xcchen/xcchenvits-uma-genshin-honkai/modules.py +++ /dev/null @@ -1,388 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/xfh/min-stable-diffusion-web/README.md b/spaces/xfh/min-stable-diffusion-web/README.md deleted file mode 100644 index 50efeeca2301440788d0a62d352d18c3ec03c4af..0000000000000000000000000000000000000000 --- a/spaces/xfh/min-stable-diffusion-web/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Min Stable Diffusion Web -emoji: 🏢 -colorFrom: blue -colorTo: purple -sdk: gradio -sdk_version: 3.10.1 -app_file: app.py -python_version: 3.10.6 -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/models/resnet_ibn_a.py b/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/models/resnet_ibn_a.py deleted file mode 100644 index d198e7c9e361c40d25bc7eb1f352b971596ee124..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/models/resnet_ibn_a.py +++ /dev/null @@ -1,289 +0,0 @@ -""" -Credit to https://github.com/XingangPan/IBN-Net. -""" -from __future__ import division, absolute_import -import math -import torch -import torch.nn as nn -import torch.utils.model_zoo as model_zoo - -__all__ = ['resnet50_ibn_a'] - -model_urls = { - 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', - 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', - 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', -} - - -def conv3x3(in_planes, out_planes, stride=1): - "3x3 convolution with padding" - return nn.Conv2d( - in_planes, - out_planes, - kernel_size=3, - stride=stride, - padding=1, - bias=False - ) - - -class BasicBlock(nn.Module): - expansion = 1 - - def __init__(self, inplanes, planes, stride=1, downsample=None): - super(BasicBlock, self).__init__() - self.conv1 = conv3x3(inplanes, planes, stride) - self.bn1 = nn.BatchNorm2d(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes) - self.bn2 = nn.BatchNorm2d(planes) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class IBN(nn.Module): - - def __init__(self, planes): - super(IBN, self).__init__() - half1 = int(planes / 2) - self.half = half1 - half2 = planes - half1 - self.IN = nn.InstanceNorm2d(half1, affine=True) - self.BN = nn.BatchNorm2d(half2) - - def forward(self, x): - split = torch.split(x, self.half, 1) - out1 = self.IN(split[0].contiguous()) - out2 = self.BN(split[1].contiguous()) - out = torch.cat((out1, out2), 1) - return out - - -class Bottleneck(nn.Module): - expansion = 4 - - def __init__(self, inplanes, planes, ibn=False, stride=1, downsample=None): - super(Bottleneck, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) - if ibn: - self.bn1 = IBN(planes) - else: - self.bn1 = nn.BatchNorm2d(planes) - self.conv2 = nn.Conv2d( - planes, - planes, - kernel_size=3, - stride=stride, - padding=1, - bias=False - ) - self.bn2 = nn.BatchNorm2d(planes) - self.conv3 = nn.Conv2d( - planes, planes * self.expansion, kernel_size=1, bias=False - ) - self.bn3 = nn.BatchNorm2d(planes * self.expansion) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class ResNet(nn.Module): - """Residual network + IBN layer. - - Reference: - - He et al. Deep Residual Learning for Image Recognition. CVPR 2016. - - Pan et al. Two at Once: Enhancing Learning and Generalization - Capacities via IBN-Net. ECCV 2018. - """ - - def __init__( - self, - block, - layers, - num_classes=1000, - loss='softmax', - fc_dims=None, - dropout_p=None, - **kwargs - ): - scale = 64 - self.inplanes = scale - super(ResNet, self).__init__() - self.loss = loss - self.feature_dim = scale * 8 * block.expansion - - self.conv1 = nn.Conv2d( - 3, scale, kernel_size=7, stride=2, padding=3, bias=False - ) - self.bn1 = nn.BatchNorm2d(scale) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.layer1 = self._make_layer(block, scale, layers[0]) - self.layer2 = self._make_layer(block, scale * 2, layers[1], stride=2) - self.layer3 = self._make_layer(block, scale * 4, layers[2], stride=2) - self.layer4 = self._make_layer(block, scale * 8, layers[3], stride=2) - self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) - self.fc = self._construct_fc_layer( - fc_dims, scale * 8 * block.expansion, dropout_p - ) - self.classifier = nn.Linear(self.feature_dim, num_classes) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels - m.weight.data.normal_(0, math.sqrt(2. / n)) - elif isinstance(m, nn.BatchNorm2d): - m.weight.data.fill_(1) - m.bias.data.zero_() - elif isinstance(m, nn.InstanceNorm2d): - m.weight.data.fill_(1) - m.bias.data.zero_() - - def _make_layer(self, block, planes, blocks, stride=1): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d( - self.inplanes, - planes * block.expansion, - kernel_size=1, - stride=stride, - bias=False - ), - nn.BatchNorm2d(planes * block.expansion), - ) - - layers = [] - ibn = True - if planes == 512: - ibn = False - layers.append(block(self.inplanes, planes, ibn, stride, downsample)) - self.inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append(block(self.inplanes, planes, ibn)) - - return nn.Sequential(*layers) - - def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): - """Constructs fully connected layer - - Args: - fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed - input_dim (int): input dimension - dropout_p (float): dropout probability, if None, dropout is unused - """ - if fc_dims is None: - self.feature_dim = input_dim - return None - - assert isinstance( - fc_dims, (list, tuple) - ), 'fc_dims must be either list or tuple, but got {}'.format( - type(fc_dims) - ) - - layers = [] - for dim in fc_dims: - layers.append(nn.Linear(input_dim, dim)) - layers.append(nn.BatchNorm1d(dim)) - layers.append(nn.ReLU(inplace=True)) - if dropout_p is not None: - layers.append(nn.Dropout(p=dropout_p)) - input_dim = dim - - self.feature_dim = fc_dims[-1] - - return nn.Sequential(*layers) - - def featuremaps(self, x): - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - x = self.maxpool(x) - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - return x - - def forward(self, x): - f = self.featuremaps(x) - v = self.avgpool(f) - v = v.view(v.size(0), -1) - if self.fc is not None: - v = self.fc(v) - if not self.training: - return v - y = self.classifier(v) - if self.loss == 'softmax': - return y - elif self.loss == 'triplet': - return y, v - else: - raise KeyError("Unsupported loss: {}".format(self.loss)) - - -def init_pretrained_weights(model, model_url): - """Initializes model with pretrained weights. - - Layers that don't match with pretrained layers in name or size are kept unchanged. - """ - pretrain_dict = model_zoo.load_url(model_url) - model_dict = model.state_dict() - pretrain_dict = { - k: v - for k, v in pretrain_dict.items() - if k in model_dict and model_dict[k].size() == v.size() - } - model_dict.update(pretrain_dict) - model.load_state_dict(model_dict) - - -def resnet50_ibn_a(num_classes, loss='softmax', pretrained=False, **kwargs): - model = ResNet( - Bottleneck, [3, 4, 6, 3], num_classes=num_classes, loss=loss, **kwargs - ) - if pretrained: - init_pretrained_weights(model, model_urls['resnet50']) - return model diff --git a/spaces/xp3857/Image_Restoration_Colorization/Global/detection_models/Synchronized-BatchNorm-PyTorch/tests/test_numeric_batchnorm.py b/spaces/xp3857/Image_Restoration_Colorization/Global/detection_models/Synchronized-BatchNorm-PyTorch/tests/test_numeric_batchnorm.py deleted file mode 100644 index 63661389782806ea2182c049448df5d05fc6d2f1..0000000000000000000000000000000000000000 --- a/spaces/xp3857/Image_Restoration_Colorization/Global/detection_models/Synchronized-BatchNorm-PyTorch/tests/test_numeric_batchnorm.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- -# File : test_numeric_batchnorm.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. - -import unittest - -import torch -import torch.nn as nn -from torch.autograd import Variable - -from sync_batchnorm.unittest import TorchTestCase - - -def handy_var(a, unbias=True): - n = a.size(0) - asum = a.sum(dim=0) - as_sum = (a ** 2).sum(dim=0) # a square sum - sumvar = as_sum - asum * asum / n - if unbias: - return sumvar / (n - 1) - else: - return sumvar / n - - -class NumericTestCase(TorchTestCase): - def testNumericBatchNorm(self): - a = torch.rand(16, 10) - bn = nn.BatchNorm1d(10, momentum=1, eps=1e-5, affine=False) - bn.train() - - a_var1 = Variable(a, requires_grad=True) - b_var1 = bn(a_var1) - loss1 = b_var1.sum() - loss1.backward() - - a_var2 = Variable(a, requires_grad=True) - a_mean2 = a_var2.mean(dim=0, keepdim=True) - a_std2 = torch.sqrt(handy_var(a_var2, unbias=False).clamp(min=1e-5)) - # a_std2 = torch.sqrt(a_var2.var(dim=0, keepdim=True, unbiased=False) + 1e-5) - b_var2 = (a_var2 - a_mean2) / a_std2 - loss2 = b_var2.sum() - loss2.backward() - - self.assertTensorClose(bn.running_mean, a.mean(dim=0)) - self.assertTensorClose(bn.running_var, handy_var(a)) - self.assertTensorClose(a_var1.data, a_var2.data) - self.assertTensorClose(b_var1.data, b_var2.data) - self.assertTensorClose(a_var1.grad, a_var2.grad) - - -if __name__ == '__main__': - unittest.main() diff --git a/spaces/xswu/HPSv2/src/open_clip/hf_model.py b/spaces/xswu/HPSv2/src/open_clip/hf_model.py deleted file mode 100644 index fbccc812757bf10b122ff14096980e0e38d1d221..0000000000000000000000000000000000000000 --- a/spaces/xswu/HPSv2/src/open_clip/hf_model.py +++ /dev/null @@ -1,176 +0,0 @@ -""" huggingface model adapter - -Wraps HuggingFace transformers (https://github.com/huggingface/transformers) models for use as a text tower in CLIP model. -""" - -import re - -import torch -import torch.nn as nn -from torch import TensorType - -try: - import transformers - from transformers import AutoModel, AutoTokenizer, AutoConfig, PretrainedConfig - from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, \ - BaseModelOutputWithPoolingAndCrossAttentions -except ImportError as e: - transformers = None - - - class BaseModelOutput: - pass - - - class PretrainedConfig: - pass - -from .hf_configs import arch_dict - - -# utils -def _camel2snake(s): - return re.sub(r'(? ( - -
                - {children} -
                -
                -) -DialogPortal.displayName = DialogPrimitive.Portal.displayName - -const DialogOverlay = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogOverlay.displayName = DialogPrimitive.Overlay.displayName - -const DialogContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, children, ...props }, ref) => ( - - - - {children} - - - Close - - - -)) -DialogContent.displayName = DialogPrimitive.Content.displayName - -const DialogHeader = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
                -) -DialogHeader.displayName = 'DialogHeader' - -const DialogFooter = ({ - className, - ...props -}: React.HTMLAttributes) => ( -
                -) -DialogFooter.displayName = 'DialogFooter' - -const DialogTitle = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogTitle.displayName = DialogPrimitive.Title.displayName - -const DialogDescription = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, ...props }, ref) => ( - -)) -DialogDescription.displayName = DialogPrimitive.Description.displayName - -export { - Dialog, - DialogTrigger, - DialogContent, - DialogHeader, - DialogFooter, - DialogTitle, - DialogDescription -} diff --git a/spaces/yaoshining/text-generation-webui/docs/ExLlama.md b/spaces/yaoshining/text-generation-webui/docs/ExLlama.md deleted file mode 100644 index db0ebe63c90cf155e8b550e73a542d560ccb0b54..0000000000000000000000000000000000000000 --- a/spaces/yaoshining/text-generation-webui/docs/ExLlama.md +++ /dev/null @@ -1,22 +0,0 @@ -# ExLlama - -### About - -ExLlama is an extremely optimized GPTQ backend for LLaMA models. It features much lower VRAM usage and much higher speeds due to not relying on unoptimized transformers code. - -### Usage - -Configure text-generation-webui to use exllama via the UI or command line: - - In the "Model" tab, set "Loader" to "exllama" - - Specify `--loader exllama` on the command line - -### Manual setup - -No additional installation steps are necessary since an exllama package is already included in the requirements.txt. If this package fails to install for some reason, you can install it manually by cloning the original repository into your `repositories/` folder: - -``` -mkdir repositories -cd repositories -git clone https://github.com/turboderp/exllama -``` - diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/components/TempoGraph/TempoGraphCanvas/Lines.tsx b/spaces/yderre-aubay/midi-player-demo/src/main/components/TempoGraph/TempoGraphCanvas/Lines.tsx deleted file mode 100644 index 4ea4fe883446cdc663d46f18ba24b51b5294e2c9..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/components/TempoGraph/TempoGraphCanvas/Lines.tsx +++ /dev/null @@ -1,33 +0,0 @@ -import { Rectangles } from "@ryohey/webgl-react" -import Color from "color" -import { range } from "lodash" -import { observer } from "mobx-react-lite" -import { FC } from "react" -import { IRect } from "../../../../common/geometry" -import { colorToVec4 } from "../../../gl/color" -import { useStores } from "../../../hooks/useStores" -import { useTheme } from "../../../hooks/useTheme" - -export const Lines: FC<{ width: number; zIndex: number }> = observer( - ({ width, zIndex }) => { - const { - tempoEditorStore: { transform }, - } = useStores() - const theme = useTheme() - - const hline = (y: number): IRect => ({ - x: 0, - y, - width, - height: 1, - }) - - // 30 -> 510 = 17 Divided line - const rects = range(30, transform.maxBPM, 30) - .map((i) => transform.getY(i)) - .map(hline) - const color = colorToVec4(Color(theme.dividerColor)) - - return - }, -) diff --git a/spaces/yerfor/SyntaSpeech/README.md b/spaces/yerfor/SyntaSpeech/README.md deleted file mode 100644 index 7328b6ff2d9da31cd02262c248df2b54b9d18fee..0000000000000000000000000000000000000000 --- a/spaces/yerfor/SyntaSpeech/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: SyntaSpeech -emoji: 🤗 -colorFrom: yellow -colorTo: orange -sdk: gradio -app_file: "inference/tts/gradio/infer.py" -pinned: false ---- - diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/GroundingDINO/groundingdino/models/GroundingDINO/backbone/backbone.py b/spaces/yizhangliu/Grounded-Segment-Anything/GroundingDINO/groundingdino/models/GroundingDINO/backbone/backbone.py deleted file mode 100644 index c8340c723fad8e07e2fc62daaa3912487498814b..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/GroundingDINO/groundingdino/models/GroundingDINO/backbone/backbone.py +++ /dev/null @@ -1,221 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Conditional DETR -# Copyright (c) 2021 Microsoft. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Copied from DETR (https://github.com/facebookresearch/detr) -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. -# ------------------------------------------------------------------------ - -""" -Backbone modules. -""" - -from typing import Dict, List - -import torch -import torch.nn.functional as F -import torchvision -from torch import nn -from torchvision.models._utils import IntermediateLayerGetter - -from groundingdino.util.misc import NestedTensor, clean_state_dict, is_main_process - -from .position_encoding import build_position_encoding -from .swin_transformer import build_swin_transformer - - -class FrozenBatchNorm2d(torch.nn.Module): - """ - BatchNorm2d where the batch statistics and the affine parameters are fixed. - - Copy-paste from torchvision.misc.ops with added eps before rqsrt, - without which any other models than torchvision.models.resnet[18,34,50,101] - produce nans. - """ - - def __init__(self, n): - super(FrozenBatchNorm2d, self).__init__() - self.register_buffer("weight", torch.ones(n)) - self.register_buffer("bias", torch.zeros(n)) - self.register_buffer("running_mean", torch.zeros(n)) - self.register_buffer("running_var", torch.ones(n)) - - def _load_from_state_dict( - self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs - ): - num_batches_tracked_key = prefix + "num_batches_tracked" - if num_batches_tracked_key in state_dict: - del state_dict[num_batches_tracked_key] - - super(FrozenBatchNorm2d, self)._load_from_state_dict( - state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs - ) - - def forward(self, x): - # move reshapes to the beginning - # to make it fuser-friendly - w = self.weight.reshape(1, -1, 1, 1) - b = self.bias.reshape(1, -1, 1, 1) - rv = self.running_var.reshape(1, -1, 1, 1) - rm = self.running_mean.reshape(1, -1, 1, 1) - eps = 1e-5 - scale = w * (rv + eps).rsqrt() - bias = b - rm * scale - return x * scale + bias - - -class BackboneBase(nn.Module): - def __init__( - self, - backbone: nn.Module, - train_backbone: bool, - num_channels: int, - return_interm_indices: list, - ): - super().__init__() - for name, parameter in backbone.named_parameters(): - if ( - not train_backbone - or "layer2" not in name - and "layer3" not in name - and "layer4" not in name - ): - parameter.requires_grad_(False) - - return_layers = {} - for idx, layer_index in enumerate(return_interm_indices): - return_layers.update( - {"layer{}".format(5 - len(return_interm_indices) + idx): "{}".format(layer_index)} - ) - - # if len: - # if use_stage1_feature: - # return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"} - # else: - # return_layers = {"layer2": "0", "layer3": "1", "layer4": "2"} - # else: - # return_layers = {'layer4': "0"} - self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) - self.num_channels = num_channels - - def forward(self, tensor_list: NestedTensor): - xs = self.body(tensor_list.tensors) - out: Dict[str, NestedTensor] = {} - for name, x in xs.items(): - m = tensor_list.mask - assert m is not None - mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0] - out[name] = NestedTensor(x, mask) - # import ipdb; ipdb.set_trace() - return out - - -class Backbone(BackboneBase): - """ResNet backbone with frozen BatchNorm.""" - - def __init__( - self, - name: str, - train_backbone: bool, - dilation: bool, - return_interm_indices: list, - batch_norm=FrozenBatchNorm2d, - ): - if name in ["resnet18", "resnet34", "resnet50", "resnet101"]: - backbone = getattr(torchvision.models, name)( - replace_stride_with_dilation=[False, False, dilation], - pretrained=is_main_process(), - norm_layer=batch_norm, - ) - else: - raise NotImplementedError("Why you can get here with name {}".format(name)) - # num_channels = 512 if name in ('resnet18', 'resnet34') else 2048 - assert name not in ("resnet18", "resnet34"), "Only resnet50 and resnet101 are available." - assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]] - num_channels_all = [256, 512, 1024, 2048] - num_channels = num_channels_all[4 - len(return_interm_indices) :] - super().__init__(backbone, train_backbone, num_channels, return_interm_indices) - - -class Joiner(nn.Sequential): - def __init__(self, backbone, position_embedding): - super().__init__(backbone, position_embedding) - - def forward(self, tensor_list: NestedTensor): - xs = self[0](tensor_list) - out: List[NestedTensor] = [] - pos = [] - for name, x in xs.items(): - out.append(x) - # position encoding - pos.append(self[1](x).to(x.tensors.dtype)) - - return out, pos - - -def build_backbone(args): - """ - Useful args: - - backbone: backbone name - - lr_backbone: - - dilation - - return_interm_indices: available: [0,1,2,3], [1,2,3], [3] - - backbone_freeze_keywords: - - use_checkpoint: for swin only for now - - """ - position_embedding = build_position_encoding(args) - train_backbone = True - if not train_backbone: - raise ValueError("Please set lr_backbone > 0") - return_interm_indices = args.return_interm_indices - assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]] - args.backbone_freeze_keywords - use_checkpoint = getattr(args, "use_checkpoint", False) - - if args.backbone in ["resnet50", "resnet101"]: - backbone = Backbone( - args.backbone, - train_backbone, - args.dilation, - return_interm_indices, - batch_norm=FrozenBatchNorm2d, - ) - bb_num_channels = backbone.num_channels - elif args.backbone in [ - "swin_T_224_1k", - "swin_B_224_22k", - "swin_B_384_22k", - "swin_L_224_22k", - "swin_L_384_22k", - ]: - pretrain_img_size = int(args.backbone.split("_")[-2]) - backbone = build_swin_transformer( - args.backbone, - pretrain_img_size=pretrain_img_size, - out_indices=tuple(return_interm_indices), - dilation=False, - use_checkpoint=use_checkpoint, - ) - - bb_num_channels = backbone.num_features[4 - len(return_interm_indices) :] - else: - raise NotImplementedError("Unknown backbone {}".format(args.backbone)) - - assert len(bb_num_channels) == len( - return_interm_indices - ), f"len(bb_num_channels) {len(bb_num_channels)} != len(return_interm_indices) {len(return_interm_indices)}" - - model = Joiner(backbone, position_embedding) - model.num_channels = bb_num_channels - assert isinstance( - bb_num_channels, List - ), "bb_num_channels is expected to be a List but {}".format(type(bb_num_channels)) - # import ipdb; ipdb.set_trace() - return model diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/realm/__init__.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/realm/__init__.py deleted file mode 100644 index 594ce0c35e382f82b0ba3222644cf37ef01880e1..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/realm/__init__.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import TYPE_CHECKING - -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available - - -_import_structure = { - "configuration_realm": ["REALM_PRETRAINED_CONFIG_ARCHIVE_MAP", "RealmConfig"], - "tokenization_realm": ["RealmTokenizer"], -} - -try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["tokenization_realm_fast"] = ["RealmTokenizerFast"] - -try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() -except OptionalDependencyNotAvailable: - pass -else: - _import_structure["modeling_realm"] = [ - "REALM_PRETRAINED_MODEL_ARCHIVE_LIST", - "RealmEmbedder", - "RealmForOpenQA", - "RealmKnowledgeAugEncoder", - "RealmPreTrainedModel", - "RealmReader", - "RealmScorer", - "load_tf_weights_in_realm", - ] - _import_structure["retrieval_realm"] = ["RealmRetriever"] - - -if TYPE_CHECKING: - from .configuration_realm import REALM_PRETRAINED_CONFIG_ARCHIVE_MAP, RealmConfig - from .tokenization_realm import RealmTokenizer - - try: - if not is_tokenizers_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .tokenization_realm import RealmTokenizerFast - - try: - if not is_torch_available(): - raise OptionalDependencyNotAvailable() - except OptionalDependencyNotAvailable: - pass - else: - from .modeling_realm import ( - REALM_PRETRAINED_MODEL_ARCHIVE_LIST, - RealmEmbedder, - RealmForOpenQA, - RealmKnowledgeAugEncoder, - RealmPreTrainedModel, - RealmReader, - RealmScorer, - load_tf_weights_in_realm, - ) - from .retrieval_realm import RealmRetriever - - -else: - import sys - - sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/spaces/younker/chatgpt-turbo/client/node_modules/autoprefixer/lib/hacks/flex-basis.js b/spaces/younker/chatgpt-turbo/client/node_modules/autoprefixer/lib/hacks/flex-basis.js deleted file mode 100644 index 959cf4cb6584e056db9cc7ac7d1e5168b1abea98..0000000000000000000000000000000000000000 --- a/spaces/younker/chatgpt-turbo/client/node_modules/autoprefixer/lib/hacks/flex-basis.js +++ /dev/null @@ -1,39 +0,0 @@ -let flexSpec = require('./flex-spec') -let Declaration = require('../declaration') - -class FlexBasis extends Declaration { - /** - * Return property name by final spec - */ - normalize() { - return 'flex-basis' - } - - /** - * Return flex property for 2012 spec - */ - prefixed(prop, prefix) { - let spec - ;[spec, prefix] = flexSpec(prefix) - if (spec === 2012) { - return prefix + 'flex-preferred-size' - } - return super.prefixed(prop, prefix) - } - - /** - * Ignore 2009 spec and use flex property for 2012 - */ - set(decl, prefix) { - let spec - ;[spec, prefix] = flexSpec(prefix) - if (spec === 2012 || spec === 'final') { - return super.set(decl, prefix) - } - return undefined - } -} - -FlexBasis.names = ['flex-basis', 'flex-preferred-size'] - -module.exports = FlexBasis diff --git a/spaces/yuan1615/EmpathyVC/mel_processing.py b/spaces/yuan1615/EmpathyVC/mel_processing.py deleted file mode 100644 index 4dd86ae6126101d6f3f1e4b0c5488d2f809b190b..0000000000000000000000000000000000000000 --- a/spaces/yuan1615/EmpathyVC/mel_processing.py +++ /dev/null @@ -1,119 +0,0 @@ -import torch -import torch.nn.functional as F -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec - -import struct -def create_wav_header(audio_size: int, sampleRate:int, bits:int, channel:int): - header = b'' - header += b"RIFF" - header += struct.pack('i', int(audio_size + 44 - 8)) - header += b"WAVEfmt " - header += b'\x10\x00\x00\x00' - header += b'\x01\x00' - header += struct.pack('H', channel) - header += struct.pack('i', sampleRate) - header += struct.pack('i', int(sampleRate * bits / 8)) - header += struct.pack('H', int(channel * bits / 8)) - header += struct.pack('H', bits) - header += b'data' - header += struct.pack('i', audio_size) - return header diff --git a/spaces/yuan2023/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/controlnet/__init__.py b/spaces/yuan2023/Stable-Diffusion-ControlNet-WebUI/diffusion_webui/controlnet/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/zhoujiaxin/zhoujiaxinchatgpt/src/lib/isomorphic/browser.ts b/spaces/zhoujiaxin/zhoujiaxinchatgpt/src/lib/isomorphic/browser.ts deleted file mode 100644 index de125b1f1786d1618cb1ff47f403d76c6784f4ce..0000000000000000000000000000000000000000 --- a/spaces/zhoujiaxin/zhoujiaxinchatgpt/src/lib/isomorphic/browser.ts +++ /dev/null @@ -1,11 +0,0 @@ -'use client' - -const debug = console.info.bind(console) - -class WebSocketAlias extends WebSocket { - constructor(address: string | URL, ...args: any) { - super(address) - } -} - -export default { fetch, WebSocket: WebSocketAlias, debug } diff --git a/spaces/zlc99/M4Singer/modules/parallel_wavegan/losses/__init__.py b/spaces/zlc99/M4Singer/modules/parallel_wavegan/losses/__init__.py deleted file mode 100644 index b03080a907cb5cb4b316ceb74866ddbc406b33bf..0000000000000000000000000000000000000000 --- a/spaces/zlc99/M4Singer/modules/parallel_wavegan/losses/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .stft_loss import * # NOQA diff --git a/spaces/ztudy/chatbot/README.md b/spaces/ztudy/chatbot/README.md deleted file mode 100644 index 63adcc7c82c32d880dc74237e99493185719a016..0000000000000000000000000000000000000000 --- a/spaces/ztudy/chatbot/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Chatbot -emoji: 🐢 -colorFrom: red -colorTo: pink -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference