diff --git a/spaces/0x876/Yotta_Mix/app.py b/spaces/0x876/Yotta_Mix/app.py
deleted file mode 100644
index b60a087620a806fea130bedcd6940bef75fa3337..0000000000000000000000000000000000000000
--- a/spaces/0x876/Yotta_Mix/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/CompVis/stable-diffusion-v1-4").launch()
diff --git a/spaces/0x90e/ESRGAN-MANGA/process_image.py b/spaces/0x90e/ESRGAN-MANGA/process_image.py
deleted file mode 100644
index ca687c0329f171f83db2bf9c4a3bb2d6ffadfda9..0000000000000000000000000000000000000000
--- a/spaces/0x90e/ESRGAN-MANGA/process_image.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import os
-import gradio as gr
-from run_cmd import run_cmd
-from PIL import Image
-import tempfile
-import uuid
-import numpy as np
-
-temp_path = tempfile.gettempdir()
-
-def inference(img, size, type):
- if not img:
- raise Exception("No image!")
-
- OUTPUT_PATH = os.path.join(temp_path, f"{str(uuid.uuid4())[0:12]}_{size}.png")
-
- img.save(OUTPUT_PATH)
-
- if type == "Manga":
- run_cmd(f"python inference_manga_v2.py {OUTPUT_PATH}")
- else:
- run_cmd(f"python inference.py {OUTPUT_PATH} {type}")
-
- img_out = Image.open(OUTPUT_PATH)
-
- if size == "x2":
- img_out = img_out.resize((img_out.width // 2, img_out.height // 2), resample=Image.BICUBIC)
-
- img_out = np.array(img_out)
-
- return img_out, gr.File.update(value=OUTPUT_PATH)
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/Skyrim-Simpackdll.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/Skyrim-Simpackdll.md
deleted file mode 100644
index e03bcb747aeca5fb2139d3b4c835094772e29991..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/Skyrim-Simpackdll.md
+++ /dev/null
@@ -1,88 +0,0 @@
-## Skyrim Simpackdll
-
-
-
-
-
- 
-
-
-
-
-
-**LINK ===> [https://jinyurl.com/2tA0aO](https://jinyurl.com/2tA0aO)**
-
-
-
-
-
-
-
-
-
-
-
-
-
-# How to Fix Skyrim Simpackdll Error
-
-
-
-If you are trying to play Skyrim on your PC, you may encounter an error message that says "simpack.dll is missing" or "simpack.dll not found". This error means that your system does not have the simpack.dll file, which is a dynamic link library (DLL) file that is required by the Skyrim game. The simpack.dll file contains various functions and routines that are used by the game to perform simulations of mechanical systems, such as vehicle dynamics, suspension systems, and powertrain systems.
-
-
-
-The simpack.dll error can be caused by various reasons, such as a corrupt or missing DLL file, conflicts with other software, or malware infections. In this article, we will show you how to fix the Skyrim simpackdll error by following these steps:
-
-
-
-1. Reinstall the Skyrim game. The easiest way to fix the simpack.dll error is to reinstall the Skyrim game on your PC. This will ensure that you have all the necessary files and components for the game to run properly. To reinstall the game, you need to uninstall it first from your Control Panel or Settings app, and then install it again from your original source, such as a CD/DVD or a digital download.
-
-2. Download and restore the simpack.dll file. If reinstalling the game does not work, you can try to download and restore the simpack.dll file manually. You can get the file from a reliable source, such as [DLLme.com](https://www.dllme.com/dll/files/simpack), which offers free DLL downloads for various software applications. To download and restore the simpack.dll file, follow these steps:
-
- - Go to [DLLme.com](https://www.dllme.com/dll/files/simpack) and search for "simpack.dll".
-
- - Click on the "Download" button and save the file to your computer.
-
- - Copy and paste the file to the folder where Skyrim is installed. The default location is C:\Program Files (x86)\Steam\steamapps\common\Skyrim.
-
- - Restart your computer and launch Skyrim.
-
-3. Scan your PC for malware. Sometimes, the simpack.dll error can be caused by malware infections that may damage or delete the DLL file. To scan your PC for malware, you need to use a reputable antivirus or anti-malware software, such as [Malwarebytes](https://www.malwarebytes.com/), which can detect and remove various types of malware threats from your system. To scan your PC for malware, follow these steps:
-
- - Download and install Malwarebytes from [Malwarebytes.com](https://www.malwarebytes.com/).
-
- - Launch Malwarebytes and click on the "Scan" button.
-
- - Wait for the scan to complete and review the results.
-
- - If any malware is detected, click on the "Quarantine" button to remove them.
-
- - Restart your computer and launch Skyrim.
-
-4. Update your drivers and Windows. Another possible cause of the simpack.dll error is outdated or incompatible drivers or Windows updates. Drivers are software components that allow your hardware devices to communicate with your operating system. Windows updates are software patches that fix bugs and improve security and performance of your system. To update your drivers and Windows, follow these steps:
-
- - Go to Device Manager by pressing Windows + X keys and selecting Device Manager from the menu.
-
- - Expand each category of devices and look for any yellow exclamation marks or red crosses.
-
- - If you find any, right-click on them and select Update driver.
-
- - Follow the on-screen instructions to install the latest drivers for your devices.
-
- - Go to Settings by pressing Windows + I keys and selecting Settings from the menu.
-
- - Click on Update & Security and then on Windows Update.
-
- - Click on Check for updates and install any available updates for your system.
-
-<145887f19f
-
-
-
-
-
-
-
-
-
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cle De Licence Malwarebytes Anti Malware Gratuit Tlchargez Et Installez Le Logiciel En Quelques Minutes.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cle De Licence Malwarebytes Anti Malware Gratuit Tlchargez Et Installez Le Logiciel En Quelques Minutes.md
deleted file mode 100644
index a5cf50a7d374403dad94e4e51f1d92dce47bcae3..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Cle De Licence Malwarebytes Anti Malware Gratuit Tlchargez Et Installez Le Logiciel En Quelques Minutes.md
+++ /dev/null
@@ -1,113 +0,0 @@
-
-
Cle De Licence Malwarebytes Anti Malware Gratuit
-
Are you looking for a way to protect your computer from viruses, malware, ransomware, and other online threats? Do you want to enjoy the full features of one of the best anti-malware software in the market without paying a dime? If yes, then you are in the right place. In this article, we will show you how to get a free license key for Malwarebytes Anti Malware, a powerful and reliable cyber security software that can scan and remove malware from your device in seconds. We will also explain what Malwarebytes Anti Malalare is, why you need a license key for it, and what features it offers. So, let's get started!
-
Introduction
-
What is Malwarebytes Anti Malware?
-
Malwarebytes Anti Malware is a cyber security software that protects your device from malware, viruses, ransomware, spyware, adware, trojans, and other online threats. It uses multiple layers of malware-crushing technology, including real-time protection, artificial intelligence, behavior analysis, and web filtering. It can detect and remove threats that other antivirus programs may miss or overlook. It can also work alongside your existing antivirus software to provide an extra layer of security.
Why do you need a license key for Malwarebytes Anti Malalare?
-
Malwarebytes Anti Malalare offers two versions: a free version and a premium version. The free version allows you to scan and remove malware from your device manually. However, it does not offer real-time protection, ransomware protection, web protection, or privacy protection. These features are only available in the premium version, which requires a license key to activate. A license key is a unique code that verifies your purchase and unlocks the full features of Malwarebytes Anti Malalare. The premium version costs $39.99 per year for one device or $59.99 per year for three devices.
-
How to get a free license key for Malwarebytes Anti Malalare?
-
If you don't want to pay for the premium version of Malwarebytes Anti Malalare, but still want to enjoy its full features, there is a way to get a free license key for it. You can use one of the following methods:
-
-
Use a trial version: You can download and install a 14-day trial version of Malwarebytes Anti Malalare from its official website here. This will give you access to all the premium features for free for two weeks. After that, you can either buy a license key or uninstall the software.
-
Use a giveaway: You can look for online giveaways that offer free license keys for Malwarebytes Anti Malalare. These are usually sponsored by blogs, websites, or social media pages that promote the software. You may have to follow some instructions or enter some contests to participate in these giveaways. However, be careful not to fall for scams or fake giveaways that may infect your device with malware or steal your personal information.
-
Use a crack: You can search for cracked versions of Malwarebytes Anti Malalare that come with pre-activated license keys. These are usually uploaded by hackers or pirates who bypass the security measures of the software. You can download these cracked versions from torrent sites or file-sharing platforms. However, this method is illegal and risky. You may violate the terms and conditions of the software and face legal consequences. You may also expose your device to malware or viruses that may be hidden in these cracked versions.
-
-
The best and safest way to get a free license key for Malwarebytes Anti Malalare is to use the trial version. This will allow you to test the software and see if it suits your needs before buying it.
-
Features of Malwarebytes Anti Malalare
-
Virus and malware protection
-
Malwarebytes Anti Malalare can scan your device for viruses and malware in seconds and remove them with ease. It uses advanced heuristics and artificial intelligence to detect and block threats that other antivirus programs may miss or ignore. It can also perform custom scans, scheduled scans, and hyper scans to suit your preferences and needs.
-
Ransomware protection
-
Malwarebytes Anti Malalare can protect your device from ransomware attacks that may encrypt your files and demand money to restore them. It uses behavior-based technology to monitor your system for suspicious activity and stop ransomware before it can cause any damage. It can also recover your files from backup if they are affected by ransomware.
-
Comment obtenir une cle de licence gratuite pour Malwarebytes Anti Malware
-Malwarebytes Anti Malware Premium gratuit avec cle d'activation
-Telecharger Malwarebytes Anti Malware avec cle de licence 2023
-Cle de licence Malwarebytes Anti Malware valide et fonctionnelle
-Cle de licence Malwarebytes Anti Malware gratuite a vie
-Code d'activation Malwarebytes Anti Malware gratuit et sans virus
-Cle de licence Malwarebytes Anti Malware pour Windows 10
-Cle de licence Malwarebytes Anti Malware pour Mac
-Cle de licence Malwarebytes Anti Malware pour Android
-Cle de licence Malwarebytes Anti Malware pour iOS
-Cle de licence Malwarebytes Anti Malware pour Linux
-Cle de licence Malwarebytes Anti Malware pour Chromebook
-Cle de licence Malwarebytes Anti Malware pour Firefox
-Cle de licence Malwarebytes Anti Malware pour Edge
-Cle de licence Malwarebytes Anti Malware pour Opera
-Cle de licence Malwarebytes Anti Malware pour Safari
-Cle de licence Malwarebytes Anti Malware pour Brave
-Cle de licence Malwarebytes Anti Malware pour Tor Browser
-Cle de licence Malwarebytes Anti Malware pour VPN
-Cle de licence Malwarebytes Anti Malware pour Ransomware Protection
-Cle de licence Malwarebytes Anti Malware pour AdwCleaner
-Cle de licence Malwarebytes Anti Malware pour Browser Guard
-Cle de licence Malwarebytes Anti Malware pour Privacy
-Cle de licence Malwarebytes Anti Malware pour Endpoint Protection
-Cle de licence Malwarebytes Anti Malware pour Endpoint Detection and Response
-Cle de licence Malwarebytes Anti Malware pour Incident Response
-Cle de licence Malwarebytes Anti Malware pour Cloud Platform
-Cle de licence Malwarebytes Anti Malware pour Nebula Platform
-Cle de licence Malwarebytes Anti Malware pour OneView Platform
-Cle de licence Malwarebytes Anti Malware pour MSP Premier Partner Program
-Comparatif des meilleurs logiciels anti malware gratuits avec cle de licence
-Avis et test complet sur le logiciel anti malware gratuit avec cle de licence
-Tutoriel et guide d'utilisation du logiciel anti malware gratuit avec cle de licence
-Astuces et conseils pour optimiser le logiciel anti malware gratuit avec cle de licence
-FAQ sur le logiciel anti malware gratuit avec cle de licence
-Forum et support technique sur le logiciel anti malware gratuit avec cle de licence
-Blog et actualites sur le logiciel anti malware gratuit avec cle de licence
-Video et demonstration sur le logiciel anti malware gratuit avec cle de licence
-Telechargement et installation du logiciel anti malware gratuit avec cle de licence
-Mise a jour et renouvellement du logiciel anti malware gratuit avec cle de licence
-Desinstallation et desactivation du logiciel anti malware gratuit avec cle de licence
-Problemes et solutions du logiciel anti malware gratuit avec cle de licence
-Avantages et inconvenients du logiciel anti malware gratuit avec cle de licence
-Alternatives et concurrents du logiciel anti malware gratuit avec cle de licence
-Promotions et reductions sur le logiciel anti malware gratuit avec cle de licence
-Garantie et remboursement sur le logiciel anti malware gratuit avec cle de licence
-Contact et service client sur le logiciel anti malware gratuit avec cle de licence
-Avis clients et temoignages sur le logiciel anti malware gratuit avec cle de licence
-
Web protection
-
Malwarebytes Anti Malalare can protect your online browsing from malicious websites, ads, and downloads that may harm your device or compromise your privacy. It uses web filtering technology to block phishing sites, scam sites, fake news sites, and other dangerous sites that may try to steal your personal information or infect your device with malware. It can also prevent unwanted programs from installing on your device without your consent.
-
Privacy protection
-
Malwarebytes Anti Malalare can protect your online privacy from hackers, trackers, and spies that may try to access your data or monitor your online activity. It uses VPN technology to encrypt your internet connection and hide your IP address and location from prying eyes. It also offers anti-tracking features that prevent websites from collecting your browsing history, cookies, or other data.
-
How to install and activate Malwarebytes Anti Malalare with a free license key
-
Download and install Malwarebytes Anti Malalare
-
To download and install Malwarebytes Anti Malalare on your device, follow these steps:
-
-```html lare.
-
Run the setup file and follow the instructions to install Malwarebytes Anti Malalare on your device. You may have to agree to the terms and conditions and choose a destination folder for the installation.
-
Once the installation is complete, Malwarebytes Anti Malalare will launch automatically and start scanning your device for threats.
-
-
Enter the free license key
-
To activate the premium features of Malwarebytes Anti Malalare with a free license key, follow these steps:
-
-
Open Malwarebytes Anti Malalare and click on the "Settings" icon in the top right corner.
-
Click on the "Account" tab and then click on the "Activate License" button.
-
Enter the free license key that you obtained from one of the methods mentioned above and click on "Activate License".
-
You will see a confirmation message that your license key has been activated and your premium features have been unlocked.
-
-
Enjoy the full features of Malwarebytes Anti Malalare
-
Now that you have activated the premium features of Malwarebytes Anti Malalare with a free license key, you can enjoy the full benefits of this powerful and reliable cyber security software. You can scan and remove malware from your device in seconds, protect your device from ransomware attacks, block malicious websites and downloads, and secure your online privacy with VPN and anti-tracking features. You can also customize your settings, manage your devices, and access support and updates from Malwarebytes.
-
Conclusion
-
Summary of the main points
-
In this article, we have shown you how to get a free license key for Malwarebytes Anti Malalare, a cyber security software that protects your device from malware, viruses, ransomware, spyware, adware, trojans, and other online threats. We have also explained what Malwarebytes Anti Malalare is, why you need a license key for it, and what features it offers. We have also provided a step-by-step guide on how to download, install, and activate Malwarebytes Anti Malalare with a free license key.
-
Call to action
-
If you want to protect your device from online threats and enjoy the full features of one of the best anti-malware software in the market without paying a dime, don't hesitate to get a free license key for Malwarebytes Anti Malalare today. You can use one of the methods we have suggested above or visit this link to download and install a 14-day trial version of Malwarebytes Anti Malalare. You will be amazed by how fast and effective this software is in scanning and removing malware from your device. Don't wait any longer and get your free license key for Malwarebytes Anti Malalare now!
-
Frequently Asked Questions
-
-
Q: Is Malwarebytes Anti Malalare safe to use?
-
A: Yes, Malwarebytes Anti Malalare is safe to use. It is a legitimate and reputable cyber security software that has been trusted and loved by millions of users worldwide. It does not contain any malware or viruses itself and does not harm your device or data in any way.
-
Q: Is Malwarebytes Anti Malalare compatible with other antivirus software?
-
A: Yes, Malwarebytes Anti Malalare is compatible with other antivirus software. It can work alongside your existing antivirus software to provide an extra layer of security. However, you may have to adjust some settings or disable some features to avoid conflicts or performance issues.
-
Q: How long does the free license key for Malwarebytes Anti Malalare last?
-
A: The free license key for Malwarebytes Anti Malalare lasts for different periods depending on the method you use to obtain it. If you use the trial version, it lasts for 14 days. If you use a giveaway, it may last for a few months or a year. If you use a crack, it may last indefinitely or until it is detected and blocked by Malwarebytes.
-```html lare?
-
A: You can renew or extend your free license key for Malwarebytes Anti Malalare by using one of the methods we have suggested above. You can either download and install a new trial version, look for a new giveaway, or search for a new crack. However, we recommend that you buy a license key from the official website of Malwarebytes if you want to support the developers and enjoy the premium features without any hassle or risk.
-
Q: How can I contact Malwarebytes if I have any questions or issues with Malwarebytes Anti Malalare?
-
A: You can contact Malwarebytes if you have any questions or issues with Malwarebytes Anti Malalare by visiting their support page here. You can find answers to common questions, troubleshooting guides, user manuals, and forums. You can also submit a ticket or chat with a support agent if you need more help.
-
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/DVDIdle Pro v5.9.8.3 (precracked) free download The ultimate tool for DVD playback enhancement.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/DVDIdle Pro v5.9.8.3 (precracked) free download The ultimate tool for DVD playback enhancement.md
deleted file mode 100644
index 01d280ab3b08a95486656437b2670329b8bfdd23..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/DVDIdle Pro v5.9.8.3 (precracked) free download The ultimate tool for DVD playback enhancement.md
+++ /dev/null
@@ -1,90 +0,0 @@
-
-
DVDIdle Pro v5.9.8.3 (precracked) free download
-
Do you love watching DVD movies on your PC but hate the annoying region codes and copy protections that prevent you from enjoying them fully? Do you want to extend the lifetime of your DVD drive by reducing its wear and tear? If you answered yes to any of these questions, then you need DVDIdle Pro, a powerful software that combines the functionality of DVD Region-Free and DVDIdle into one efficient program.
DVDIdle Pro is a software that helps you to watch and copy any DVD movie on any DVD drive, regardless of the region code or the copy protection. It works in the background to provide a smart read-ahead cache for your DVD player, saving the DVD data that will be played soon onto your hard disk or RAM cache, and feeding it to your DVD player when needed. This way, your DVD drive can take a rest and work less, extending its lifetime by up to 800 percent.
-
DVDIdle Pro features
-
DVDIdle Pro has many features that make it a must-have software for any DVD lover. Some of these features are:
-
-
It supports region-protected (RPC2) DVD drives, and does not require any firmware modifications.
-
It works with all software DVD players, such as PowerDVD, WinDVD, etc.
-
It works with all DVD copy software, such as DVDFab, DVD X Copy, InterVideo DVD Copy, etc.
-
It removes user operation prohibitions (UOPs) from DVDs, allowing you to skip FBI warnings, trailers, etc.
-
It removes CSS (Content Scrambling System) encryption from DVDs, allowing you to make backup copies of your DVDs.
-
It removes region code enhancement (RCE) from DVDs, allowing you to watch DVDs from any region.
-
It removes Sony ARccOS protection from DVDs, allowing you to copy DVDs with bad sectors.
-
It removes Macrovision protection from DVDs, allowing you to connect your PC to your TV or projector.
-
-
DVDIdle Pro benefits
-
DVDIdle Pro has many benefits that make it worth downloading and installing on your PC. Some of these benefits are:
-
-
It saves your money by allowing you to watch and copy any DVD movie without buying a region-free DVD player or a DVD decrypter.
-
It saves your time by allowing you to skip unwanted parts of DVDs and access the main menu directly.
-
It saves your disk space by allowing you to compress DVDs to fit on a single blank disc or a USB flash drive.
-
It saves your battery power by allowing you to watch DVDs on your laptop without spinning the DVD drive constantly.
-
It saves your eyesight by allowing you to adjust the brightness and contrast of DVDs according to your preference.
-
-
How to download DVDIdle Pro v5.9.8.3 (precracked) for free?
-
If you are convinced that DVDIdle Pro is the software that you need, then you might be wondering how to download it for free. Well, it's very easy and simple. Just follow these steps:
-
Step 1: Visit the developer's website
-
The first thing you need to do is visit the developer's website, where you can find more information about DVDIdle Pro and its features. You can also read some reviews and comments from other users who have tried it.
-
Step 2: Click on the download link
-
The next thing you need to do is click on the download link that is provided on the website. This will take you to another page where you can choose between two options: Download Now or Download Mirror. Either option will work fine, so just pick one and click on it.
-
Step 3: Install and run the program
-
The last thing you need to do is install and run the program on your PC. The installation process is very simple and straightforward, just follow the instructions on the screen. The best part is that this version of DVDIdle Pro is precracked, which means that you don't need to enter any serial number or activation code. Just run the program and enjoy its full features without any limitations.
-
How to use DVDIdle Pro v5.9.8.3 (precracked)?
-
Now that you have downloaded and installed DVDIdle Pro on your PC, you might be wondering how to use it effectively. Well, it's very easy and simple as well. Just follow these steps:
-
Step 1: Launch DVDIdle Pro
-
The first thing you need to do is launch DVDIdle Pro from your desktop or start menu. You will see a small icon in your system tray that indicates that the program is running in the background.
-
How to get DVDIdle Pro v5.9.8.3 for free
-DVDIdle Pro v5.9.8.3 cracked version download link
-Best software to extend DVD drive lifetime
-DVDIdle Pro v5.9.8.3 features and benefits
-DVDIdle Pro v5.9.8.3 review and comparison
-Where to find DVDIdle Pro v5.9.8.3 precracked
-DVDIdle Pro v5.9.8.3 installation guide and tutorial
-DVDIdle Pro v5.9.8.3 license key generator
-DVDIdle Pro v5.9.8.3 alternative and similar software
-DVDIdle Pro v5.9.8.3 discount and coupon code
-DVDIdle Pro v5.9.8.3 system requirements and compatibility
-DVDIdle Pro v5.9.8.3 customer support and feedback
-DVDIdle Pro v5.9.8.3 update and upgrade
-DVDIdle Pro v5.9.8.3 pros and cons
-DVDIdle Pro v5.9.8.3 testimonials and ratings
-How to uninstall DVDIdle Pro v5.9.8.3
-DVDIdle Pro v5.9.8.3 troubleshooting and error fixing
-How to use DVDIdle Pro v5.9.8.3 with other software
-DVDIdle Pro v5.9.8.3 FAQs and tips
-How to optimize DVD playback with DVDIdle Pro v5.9.8.
-How to backup DVDs with DVDIdle Pro v5.
-How to rip DVDs with DVDIdle Pro v5.
-How to burn DVDs with DVDIdle Pro v5.
-How to copy DVDs with DVDIdle Pro v5.
-How to decrypt DVDs with DVDIdle Pro v5.
-How to compress DVDs with DVDIdle Pro v5.
-How to edit DVDs with DVDIdle Pro v5.
-How to convert DVDs with DVDIdle Pro v5.
-How to stream DVDs with DVDIdle Pro v5.
-How to watch DVDs with DVDIdle Pro v5.
-How to download DVDs with DVDIdle Pro v5.
-How to create DVDs with DVDIdle Pro v5.
-How to enhance DVDs with DVDIdle Pro v5.
-How to repair DVDs with DVDIdle Pro v5.
-How to clean DVDs with DVDIdle Pro v5.
-How to organize DVDs with DVDIdle Pro v5.
-How to protect DVDs with DVDIdle Pro v5.
-How to share DVDs with DVDIdle Pro v5.
-How to recover DVDs with DVDIdle Pro v5.
-How to erase DVDs with DVDIdle Pro v5.
-Is DVDIdle Pro v5 safe and legal?
-Is DVDIdle Pro v5 worth it?
-Is DVDIdle Pro v5 compatible with Windows 10?
-Is DVDIdle Pro v5 the best DVD software?
-Is DVDIdle Pro v5 virus-free?
-Is DVDIdle Pro v5 a scam or legit?
-Is DVDIdle Pro v5 free or paid?
-Is DVDIdle Pro v5 easy or hard to use?
-Is DVDIdle Pro v5 fast or slow?
0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Spiderman Friend Or Foe Iso Pc [REPACK].md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Spiderman Friend Or Foe Iso Pc [REPACK].md
deleted file mode 100644
index 06ce0fc851b446a68dd8f25694c5a119772a7bd1..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Spiderman Friend Or Foe Iso Pc [REPACK].md
+++ /dev/null
@@ -1,28 +0,0 @@
-
-```
-
How to Download Spiderman Friend Or Foe Iso Pc
-
If you are a fan of Spiderman and want to play a game that lets you team up with his friends and foes, you might be interested in Spiderman Friend Or Foe. This is a 2007 action game that was inspired by the Spiderman film trilogy and the classic Spiderman comics. In this game, you can control Spiderman and one of his allies or enemies, such as Doc Ock, Green Goblin, Venom, and Sandman, and fight against a global threat. You can also play co-op mode with a friend who can join the game at any time and take control of your sidekick.
Spiderman Friend Or Foe is available for Windows PC, but you will need to download an ISO file of the game disc and mount it on your computer. You will also need to install a NoDVD patch to bypass the SafeDisc DRM that does not work on Windows Vista and later. Here are the steps to download and play Spiderman Friend Or Foe Iso Pc:
Download the ISO file of Spiderman Friend Or Foe. The file size is about 2 GB.
-
Download a utility for mounting disc image files, such as WinCDEmu, UltraISO, Alcohol 52%/Alcohol 102%, or Daemon Tools Lite.
-
Install the utility and mount the ISO file of Spiderman Friend Or Foe on your computer.
-
Run the Setup.exe file from the mounted disc and install the game on your computer.
-
Download the NoDVD patch for Spiderman Friend Or Foe from My Abandonware.
-
Extract the Game.exe file from the NoDVD patch and replace the original Game.exe file in the game directory.
-
Launch the game and enjoy playing Spiderman Friend Or Foe Iso Pc.
-
-
If you have any problems running the game, you can try changing the compatibility mode and running it as administrator. You can also install DirectX 9 from the disc if needed. You can find more information about the game on PCSX2 Wiki or MobyGames.
-```
-
-```
-
Spiderman Friend Or Foe is a game that offers a lot of fun and variety for Spiderman fans. You can choose from 13 different characters to play as your sidekick, each with their own unique abilities and combos. You can also switch between them at any time during the game. You can unlock more characters by defeating them in boss battles or by collecting meteor shards that are scattered around the levels.
-
-
The game has a total of 18 levels that are set in different locations around the world, such as Egypt, Tokyo, Nepal, Transylvania, and New York. Each level has its own enemies, puzzles, and secrets to discover. You can also replay any level you have completed to find more collectibles and improve your score. The game also has a challenge mode where you can test your skills against waves of enemies and bosses.
-
The game has a simple and intuitive control scheme that makes it easy to play. You can use the keyboard and mouse or a gamepad to control your character. You can also play co-op mode with a friend on the same PC by using split-screen or LAN connection. The co-op mode allows you to work together and perform team combos to defeat your foes.
-
Spiderman Friend Or Foe is a game that will appeal to Spiderman fans of all ages. It has a colorful and comic-like graphics style that matches the tone of the game. It also has a humorous and original story that features voice acting from some of the actors from the Spiderman movies, such as Tobey Maguire, James Franco, and Thomas Haden Church. The game also has a lot of references and easter eggs to the Spiderman comics and movies that fans will appreciate.
-
-All you have to do is install this application on your device, which is very easy to install. You will be able to easily hack the alertpay account with ease, and without having to make the effort of getting access to any of alertpay. The application is a great success and it has a rating of 4.7 out of 5 on Google Play. It has, before you decide to go with this software, go through a few demos and read how to use the application very clearly. This hack will allow you to receive your money very fast as long as you have alertpay, paypal or any other online payment system. The application is 100% safe and does not carry any virus or malware in it. Your data will be kept secure as long as you do not use an infected device. The data you will be sharing with this hack will be completely private and no other person can access it. You do not have to worry about your credit card information as it is secured and encrypted by default. The application will be very simple to use, with only 3 steps you can use this hack on your device. The application will be sent as a apk file to your device when you have to receive your money on alertpay account. Don’t wait anymore and download the hack to get your money very fast.
-
-PHP & Software Architecture Projects for $250 - $750. Our Price : $7.00 Get the Latest Version for Alertpay/Paypal Money Hack V4 :-Alertpay/Paypal Money Hack V4 is a revolutionary software, . All you have to do is install this application on your device, which is very easy to install. You will be able to easily hack the alertpay account with ease, and without having to make the effort of getting access to any of alertpay. The application is a great success and it has a rating of 4.7 out of 5 on Google Play. It has, before you decide to go with this software, go through a few demos and read how to use the application very clearly. This hack will allow you to receive your money very fast as long as you have alertpay, paypal or any other online payment system. The application is 100% safe and does not carry any virus or malware in it. Your data will be kept secure as long as you do not use an infected device. The data you will be sharing with this hack will be completely private and no other person can access it. You do not have to worry about your 4fefd39f24
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Alvin I Vjeverice 2 Sinkronizirano Na Hr Torrent.md b/spaces/1gistliPinn/ChatGPT4/Examples/Alvin I Vjeverice 2 Sinkronizirano Na Hr Torrent.md
deleted file mode 100644
index 1c9ec8061a6abe54023f73613e5cf6091c1f4ccf..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Alvin I Vjeverice 2 Sinkronizirano Na Hr Torrent.md
+++ /dev/null
@@ -1,8 +0,0 @@
-
-
ipod lorem ipsum dolor. Is not very pretty with black vinyl. ipod iphone voltaren lavage. ipod iphone vaporware. alvin i vjeverice sinkronizirano na hrvatski torrent 3D.SK Gdje mogu prodati 3d simulacije!. alvin i vjeverice sinkronizirano na hrvatski 2. alvin i vjeverice sinkronizirano na hrvatski 3. alvin i vjeverice sinkronizirano na hrvatski 4. 5. alvin i vjeverice sinkronizirano na hrvatski 6. alvin i vjeverice sinkronizirano na hrvatski 7. alvin i vjeverice sinkronizirano na hrvatski 8.
-
alvin i vjeverice 2 sinkronizirano na hr torrent fkk naturist boys 12 14yo in the camping alvin i vjeverice 2 sinkronizirano na hr torrent 3D.SK Human Photo. on 2016-Jan-27 07:02:12. alvin i vjeverice 2 sinkronizirano na hrvatski Najvei popis web lokacija za prijavu u Velikoj. All help you need!.
alvin i vjeverice 2 sinkronizirano na hr torrent 4. alvin i vjeverice 2 sinkronizirano na hrvatski 5. alvin i vjeverice 2 sinkronizirano na hrvatski 6. alvin i vjeverice 2 sinkronizirano na hrvatski 7. alvin i vjeverice 2 sinkronizirano na hrvatski 8. alvin i vjeverice 2 sinkronizirano na hrvatski 9. alvin i vjeverice 2 sinkronizirano na hrvatski 10. alvin i vjeverice 2 sinkronizirano na hrvatski 11. alvin i vjeverice 2 sinkronizirano na hrvatski 12. alvin i vjeverice 2 sinkronizirano na hrvatski 13. alvin i vjeverice 2 sinkronizirano na hrvatski 14. alvin i vjeverice 2 sinkronizirano na hrvatski 15. alvin i vjeverice 2 sinkronizirano na hrvatski 16. alvin i vjeverice 2 sinkronizirano na hrvatski 17. alvin i vjeverice 2 sinkronizirano na hrvatski 18.
-
Sonet non sono pornografici. via!!. Un dvd dei film festivi cinematografici in attesa di ricevere un regolare la sala da cinema dove sono stati proiettati. Alla memoria..com/simbolizzazione/alvin-i-vjeverice-2-sinkronizirano-na-hr-torrent/.alvin-i-vjeverice-2-sinkronizirano-na-hr-torrent/. by. L. download. 538a28228e, Alvin I Vjeverice 3 Sinkronizirano Na Hr Torrent 2.28.18 Hr. HD. Alvin I. Vjeverice. Good quality movie Alvin I Vjeverice 2 Sinkronizirano Na Hr Torrent 3D 1080p. Vjeverice 3 Sinkronizirano Na Hr Torrent 2. Watch Alvin I Vjeverice 3 Sinkronizirano Na Hr Torrent 2.18 Hr HD Movie Online Free Download. Alvin.i.Vjeverice.3.> Alvin.> alvin i vjeverice 2 sinkronizirano na hr torrent
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Avatar The Last Cockbender Full Version __FULL__.md b/spaces/1gistliPinn/ChatGPT4/Examples/Avatar The Last Cockbender Full Version __FULL__.md
deleted file mode 100644
index 97dec3db7b501809765c7bca9238dafaeebef38d..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Avatar The Last Cockbender Full Version __FULL__.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- d5da3c52bf
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Descarga wifislax 4.3 torrent todo lo que necesitas saber sobre esta versin de Wifislax.md b/spaces/1gistliPinn/ChatGPT4/Examples/Descarga wifislax 4.3 torrent todo lo que necesitas saber sobre esta versin de Wifislax.md
deleted file mode 100644
index f802e8388c61eeaf75c732ccf19076b72aff684c..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Descarga wifislax 4.3 torrent todo lo que necesitas saber sobre esta versin de Wifislax.md
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
Esta version 3.2 de wifislax64 se ha centrado en intentar optimizar recursos para que los consumos del sistema sean lo mas bajo posibles sin causar impactos graves en el rendimiento cuando estamos en modo live.
Si quieres tener la mejor experiencia posible de navegación y uso de la web activa y acepta nuestras políticas de privacidad y cookies. En caso contrario te recomendamos abandonar la web y buscar la información en otro sitio. Si quieres registrarte para comentar las entradas de www.wifislax.com deberas aceptar las políticas de privacidad y las condiciones de uso.
-
Distro para Auditorías de seguridad en redes WiFi, una de las mejores distribuciones para ello es Wifislax. Esta distribución está basada en Slackware64 15 e incorpora todas las herramientas necesarias instaladas de forma predeterminada para realizar diferentes ataques a las redes WiFi, independientemente de si utilizan cifrado WEP, WPA o WPA2, además, también incluye todas las herramientas para hackear una red WiFi usando el protocolo WPS (Wi-Fi Protected Setup) de los routers. Ya está disponible la última versión WifiSlax 3.0 con muchos cambios, y la puedes descargar de forma totalmente gratuita.
-
Se an publicado tantas iso de wifislax y vete tu a saber quien las modifica que muchas están corruptas o sencillamente no van bien.pienso que la aventura wifislax terminó y está más que abandonada, pero fue divertido mientras duro. Ahora cual es la mejor 4.12 o 3.0 ya que ninguna se actualiza ¿funcionan los servidores o también abandonados o vendidos a un mejor postor tendríamos que preguntar al número 1 de wifislax al de la idea.pasate a Kali instala en disco duro y se te olvidará que wifislax existió buenas noches hackers
-
el 3.0 no arranca menuda perdida de tiempo ahora me bajo 4.11.1 y como no arranque me olvidare para siempre a wifislax asta que lo reparen profesionales no gente que se piensan un hacke por que han visto tutoriales
-
Todas las versiones de Wifislax son creadas por USUARIONUEVO del foro de seguridadwireless. Sus servidores de descargas ni idea, los de elhacler.net alojados en ns2.elhacker.net funcionan sin publicidad
-
-
A día de hoy el uso de las redes P2P y los clientes torrent es algo habitual en cualquier entorno, tanto empresarial como a nivel particular. Y es que son de gran utilidad cuando descargamos o compartimos archivos y queremos hacerlo de una forma fiable y segura, sobre todo cuando son archivos de gran tamaño. Para ello se hace uso del protocolo Bittorrent y algún cliente o programa que nos ofrezca una interfaz para podernos manejar es estas redes y trabajar de forma cómoda y sencilla con los ficheros torrent. En este caso, uno de los mejores programas es qBittorrent, por eso, vamos a mostrar a continuación cómo instalar la herramienta en nuestro equipo y cómo configurarla para descargar archivos con ella.
-
Cuando nos disponemos a buscar algún contenido concreto en torrent, son muchos los que suelen acudir a algunas de las muchas webs que podemos encontrar para descargar este tipo de contenidos. Sin embargo, estos sitios suelen ser populares por la cantidad de malware que contienen, problemas de acceso, caídas frecuentes, etc. De ahí, que lo mejor es echar mano de un cliente como qBittorrent, que, además, en este caso nos permite buscar torrents sin tener que acudir a ninguna página web.
-
Para comenzar a usar el cliente, lo primero que tenemos que hacer es proceder con su descarga e instalación en nuestro ordenador. Por suerte qBittorrent es un software multiplataforma de código abierto y gratuito que podemos descargar desde su página web oficial. Por lo tanto, lo único que tenemos que hacer para descargar la herramienta es seguir el enlace anterior y elegir la versión del programa que se adapte a nuestro equipo en cuanto a sistema operativo y arquitectura utilizada.
-
Una vez hecho esto, se nos descargará en nuestro disco duro un archivo .exe, por lo que, lo único que tendremos que hacer para su instalación es doble clic sobre él. El proceso es muy muy sencillo, ya que únicamente tendremos que elegir el idioma de instalación y la ruta donde queremos instalarlo y en tan solo unos instantes veremos cómo la herramienta está lista para empezar a usarla.
-
Lanzamos qBittorrent en nuestro equipo y veremos cómo cuenta con una interfaz muy limpia y con casi todas las opciones más importantes a mano. Y decimos «casi», porque lo cierto es que, aunque el cliente incluye su propio motor de búsqueda, éste no viene activado por defecto. Por lo tanto, lo primero que debemos hacer es activarlo.
-
El proceso puede tardar un poco, pero rápidamente veremos como en la ventana emergente se muestran un montón de trackers de torrents que podremos utilizar para buscar contenidos. Pulsamos en Aceptar para guardar los cambios y cerrar la ventana y ya tendremos todo listo para comenzar a usar el motor de búsqueda de qBittorrent.
-
Desde las opciones de configuración de la herramienta también podemos encontrar otros ajustes interesantes para configurar qBittorrent a nuestro gusto. Para ello, únicamente tenemos que hacer clic sobre el icono del engranaje que encontramos en la parte superior. Esto nos abrirá una ventana en la que tendremos un panel izquierdo donde aparecen las opciones del menú.
-
Entre ellas cabe destacar la opción Descargas, donde podremos configurar, entre otras cosas, la carpeta donde queremos que se nos guarden todos los archivos descargados desde qBittorrent.
-
Dentro de la opción BitTorrent podemos configurar las descargas y las subidas activas, es decir, el número de descargas y subidas de archivos que queremos que estén activas como máximo al mismo tiempo.
-
Lo ideal será hacer clic sobre la cabecera de la columna de los resultados, Semillas, para que los resultados se ordenen de manera descendente por Semillas y así obtener mejores velocidades de descarga, En el propio listado podremos ver también el tamaño del archivo y el motor de búsqueda en el que se ha encontrado.
-
Una vez que hemos elegido el archivo a descargar, hacemos doble clic sobre él y esto nos abrirá una nueva ventana donde podremos indicar la carpeta donde queremos que se guarde el archivo descargado. Pulsamos en Aceptar y automáticamente comenzará el proceso de descarga.
-
En ese mismo instante, en la pestaña Transferencias ya podremos ver la información sobre la descarga del archivo, porcentaje de progreso, semillas, estado de la descarga, pares, velocidad de bajada y subida, tiempo restante aproximado, etc.
-
Además de la búsqueda y descarga de archivos desde el propio motor de búsqueda de qBittorrent, la herramienta nos ofrece la posibilidad de utilizar archivos torrent y enlaces magnet. Lo cierto es que el proceso es similar, pero en este caso, lo primero que tendremos que hacer es buscar en una página de archivos torrent el fichero que queremos descargar.
-
Una vez encontrado y siempre asegurándonos que es un sitio y archivo de confianza, descargamos el archivo .torrent en nuestro ordenador. Ahora, lo siguiente que debemos hacer es asociar la apertura de archivos de este tipo con qBittorrent, ya que, de esta manera, lo único que tendremos que hacer para que comience a descargarse es hacer doble clic sobre el archivo .torrent.
-
Si no es así o no queremos asociar la apertura de archivos de este tipo con el cliente, entonces tendremos que añadir el archivo de forma manual desde el propio programa. Para ello, hacemos clic sobre la opción de menú Archivo y a continuación, seleccionamos la opción Añadir archivo torrent. Seleccionamos el archivo que acabamos de descargar, aceptamos el mensaje que se nos muestra para añadir el nuevo torrent y comenzará el proceso de descarga de forma automática.
-
Y si lo que queremos es descargar archivos a través de un enlace magnet, qBittorrent también nos da esa opción. Lo único que tenemos que hacer es ir a la opción Archivo > Añadir enlace torrent y copiar el enlace magnet en el cuadro de texto de la ventana que se nos abre a continuación. Por último, pulsamos en Descargar y el proceso de descarga comenzará automáticamente.
-
Aunque la aplicación funcione correctamente, lo cierto es que en un momento determinado nos podemos encontrar con que los archivos no se descargan o lo hacen a una velocidad muy lenta. En este caso, hay varias cosas que debemos revisar para tratar de encontrar la causa del problema y la solución.
-
Una de las causas de que no se realicen las descargas es que el firewall de Windows o de cualquier otra herramienta de seguridad, esté bloqueando las descargas a través de qBittorrent. Por lo tanto, podemos probar a desactivar de manera temporal la herramienta de seguridad y comprobar si de esta manera las descargas se realizan con normalidad.
-
Otro aspecto para revisar son los puertos de nuestro router, para verificar que todos los necesarios para las conexiones de qBittorrent está abiertos y correctamente redirigidos al cliente. Es importante también revisar que el protocolo UPnP de nuestro router esté correctamente activado, puesto que nos ayudará a resolver ciertos problemas de conexión.
-
También es recomendable hacer un análisis en busca de cualquier tipo de virus y malware a nuestro equipo, para evitar que cualquier tipo de amenaza esté usando la red para otros menesteres o simplemente esté usando los recursos de nuestro equipo y no deje que se dediquen a las descargas a través de qBittorrent.
-
Por último, pero no menos importante, debemos asegurarnos de que los archivos que estamos intentando descargar tienen suficientes semillas para que la descarga sea lo más rápida y fluida posible. Aunque hayas encontrado el torrent que a tu parecer da la sensación de ser perfecto, si no tiene suficientes semillas no habrá manera de hacer la descarga a una velocidad digna. Por ello, no te ofusques, busca una alternativa y seguro que la encuentras en menos tiempo del que piensas. Normalmente archivos antiguos o que no estén de moda suelen ser más complicados de descargar, pero sigue intentándolo y acabarás encontrando una solución.
free netflix download premium 9.2 download pc app is one of the most popular vpn apps available. with this app, you can access your favorite content on pcs, laptops, chromebooks, and macs that support windows, fire os, chrome, and android. you can also use this app to protect your pc from computer viruses, hackers, ddos (distributed denial of service attacks), and more.
free netflix download premium 9.2 download has a free version that lets you access videos and shows. the premium version, however, allows you to stream up to 4k quality at a faster speed than a free version. with this app, youll get the most popular series, including the walking dead, house of cards, and more. there are so many great series available, so if youre a fan, this is definitely the one for you.
-
free netflix download premium 9.2 download comes with a hotspot shield pro license. if youre an android user, you should also be a hotspot shield premium user. it has paid apps available for both apple and android devices. its super easy to download and use, and best vpn application for every operating system. free version is perfect for users who want to bypass national censorship.
-
using the app, users can access their favourite programming all in one location, meaning that there is no more need to search for the right program on the web. if a user prefers to keep up with the news, this is one of the best places to do so with netflix. netflix can be the perfect companion for your screen. the streaming giant has introduced many recent features, including 4k support and the option to watch live tv shows and episodes. to satisfy the need for streaming content, free netflix download premium serial comes with many quality features. aside from streaming, netflix has a vast library of programming. you can even see all of your favourite shows as soon as they come out, allowing you to watch your favourite shows whenever you want.
- 899543212b
-
-
\ No newline at end of file
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/12 Locks II A Puzzle Game with 3 Different Rooms and 12 Locks Each - APK Download.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/12 Locks II A Puzzle Game with 3 Different Rooms and 12 Locks Each - APK Download.md
deleted file mode 100644
index 2345c5dbb4ec7f70c6f99647adbaec24647be6e6..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/12 Locks II A Puzzle Game with 3 Different Rooms and 12 Locks Each - APK Download.md
+++ /dev/null
@@ -1,120 +0,0 @@
-
-
12 Locks II: A Fun and Challenging Puzzle Game for Android
-
If you are looking for a puzzle game that will test your logic, creativity and patience, you might want to try 12 Locks II. This is a sequel to the popular 12 Locks game by RUD Present, a developer that specializes in creating unique and colorful games with plasticine graphics. In this article, we will tell you everything you need to know about 12 Locks II, including what it is, how to play it, why you should download it and how to download it.
-
What is 12 Locks II?
-
12 Locks II is a puzzle game that challenges you to find all the keys to unlock 12 doors in different rooms. Each room has its own theme and style, such as a kitchen, a bathroom, a spaceship, a pirate ship and more. You will have to explore the rooms, interact with various objects, solve mini-puzzles and collect clues to find the keys.
The game follows the adventures of a plasticine man who has a bad habit of locking all doors to 12 locks. As a result, he finds himself in different awkward situations all the time. For example, he might get stuck in a toilet, a fridge or a washing machine. Your task is to help him escape from these predicaments by unlocking the doors.
-
The features of the game
-
Some of the features that make 12 Locks II stand out are:
-
-
The game has colorful and detailed graphics made with plasticine. The rooms are full of funny and quirky details that add to the charm and humor of the game.
-
The game has cheerful and catchy music that matches the mood of each room. The sound effects are also realistic and amusing.
-
The game has simple and intuitive controls. You just need to tap on the screen to interact with objects and drag items to use them.
-
The game has challenging and varied puzzles that require logic, creativity and attention to detail. Some puzzles are easy and straightforward, while others are more complex and tricky. You will have to use your brain and your imagination to solve them.
-
The game has no time limit or penalties. You can play at your own pace and enjoy the process of finding solutions. You can also use hints if you get stuck.
-
-
How to play 12 Locks II?
-
Playing 12 Locks II is easy and fun. Here are some tips on how to play it:
-
The controls of the game
-
To play 12 Locks II, you just need to tap on the screen to interact with objects and drag items to use them. You can also zoom in or out by pinching the screen. To move between rooms, you can swipe left or right on the screen. To access the inventory or the menu, you can tap on the icons at the bottom of the screen.
-
The tips and tricks of the game
-
To solve the puzzles in 12 Locks II, you need to pay attention to everything in the rooms. Here are some tips and tricks that might help you:
-
12 locks 2 apk download
-12 locks 2 apk mod
-12 locks 2 apk free
-12 locks 2 apk latest version
-12 locks 2 apk android
-12 locks 2 apk full
-12 locks 2 apk offline
-12 locks 2 apk unlimited
-12 locks 2 apk puzzle game
-12 locks 2 apk for pc
-12 locks 2 apk online
-12 locks 2 apk hack
-12 locks 2 apk update
-12 locks 2 apk old version
-12 locks 2 apk no ads
-12 locks 2 apk premium
-12 locks 2 apk cheats
-12 locks 2 apk review
-12 locks 2 apk walkthrough
-12 locks 2 apk tips
-12 locks 2 apk guide
-12 locks 2 apk solutions
-12 locks 2 apk hints
-12 locks 2 apk levels
-12 locks 2 apk gameplay
-12 locks 2 apk trailer
-12 locks 2 apk video
-12 locks 2 apk screenshots
-12 locks 2 apk features
-12 locks 2 apk requirements
-12 locks 2 apk size
-12 locks 2 apk rating
-12 locks 2 apk feedback
-12 locks 2 apk comments
-12 locks II android game free download
-how to play the game of the year - the best puzzle game ever - the game that will blow your mind - the game that will make you smarter - the game that will challenge you - the game that will test your logic - the game that will keep you entertained - the game that will make you laugh - the game that will make you think - the game that will make you happy
-
-
Look for clues and hints in the environment. For example, you might find codes, symbols, colors, shapes, patterns, directions, numbers or words that can help you unlock the locks.
-
Use logic and common sense to figure out the connections between the clues and the locks. For example, you might have to match colors, shapes, numbers or words to the corresponding locks.
-
Use trial and error to test your hypotheses. For example, you might have to try different combinations of codes, symbols or directions to find the right one.
-
Use creativity and imagination to think outside the box. For example, you might have to use unconventional methods or items to solve some puzzles.
-
Don't be afraid to experiment and explore. For example, you might have to tap on everything, move objects around, combine items or use items in unexpected ways.
-
-
Why should you download 12 Locks II?
-
12 Locks II is a game that will keep you entertained and challenged for hours. Here are some reasons why you should download it:
-
The benefits of playing 12 Locks II
-
Playing 12 Locks II can have many benefits for your brain and your mood. Some of them are:
-
-
It can improve your cognitive skills, such as memory, attention, concentration, logic, problem-solving and creativity.
-
It can stimulate your curiosity and imagination, as you discover new rooms and puzzles.
-
It can provide you with a sense of achievement and satisfaction, as you unlock the doors and progress in the game.
-
It can reduce your stress and anxiety, as you focus on the game and forget about your worries.
-
It can make you laugh and smile, as you enjoy the humor and fun of the game.
-
-
The reviews and ratings of the game
-
12 Locks II is a game that has received positive reviews and ratings from players and critics alike. Some of them are:
-
-
Name
Rating
Review
-
Google Play Store
4.5/5 stars
"This game is awesome! It's challenging but not frustrating. The graphics are cute and funny. The music is catchy. I love it!"
-
App Store
4.7/5 stars
"This game is amazing! It's so creative and original. The puzzles are clever and fun. The rooms are colorful and detailed. I recommend it!"
-
New Scientist
8/10 points
"This game is a delight! It's a perfect blend of logic, creativity and humor. The plasticine graphics are charming and quirky. The puzzles are varied and engaging."
-
The Sun
9/10 points
"This game is a blast! It's a great way to kill time and exercise your brain. The rooms are full of surprises and jokes. The puzzles are challenging but fair."
-
-
How to download 12 Locks II?
-
If you are interested in playing 12 Locks II, you can download it easily from your preferred app store. Here are some steps on how to do it:
-
The requirements of the game
-
To play 12 Locks II, you need to have an Android or iOS device that meets the following requirements:
-
-
Android: version 4.4 or higher; 40 MB of free space; internet connection (optional)
-
iOS: version 10.0 or higher; 64 MB of free space; internet connection (optional)
-
-
The steps to download the game
-
To download 12 Locks II from your app store, you need to follow these steps:
-
-
Open your app store (Google Play Store or App Store) on your device.
-
Type "12 Locks II" in the search bar and tap on the game icon.
-
Tap on the "Install" or "Get" button and wait for the download to finish.
-
Tap on the "Open" or "Play" button and enjoy the game!
-
-
Conclusion
-
In conclusion, 12 Locks II is a fun and challenging puzzle game that will test your logic, creativity and patience. You will have to find all the keys to unlock 12 doors in different rooms with different themes and styles. You will have to explore the rooms, interact with objects, solve mini-puzzles and collect clues to find the keys. The game has colorful and detailed graphics made with plasticine, cheerful and catchy music, simple and intuitive controls, challenging and varied puzzles, no time limit or penalties, and hints if you get stuck. Playing 12 Locks II can improve your cognitive skills, stimulate your curiosity and imagination, provide you with a sense of achievement and satisfaction, reduce your stress and anxiety, and make you laugh and smile. You can download 12 Locks II from your preferred app store by following some simple steps. If you are looking for a puzzle game that will keep you entertained and challenged for hours, you should give 12 Locks II a try!
-
FAQs
-
Here are some frequently asked questions about 12 Locks II:
-
-
Q: How many levels are there in 12 Locks II?
-
A: There are 12 levels in 12 Locks II, each with a different room and theme.
-
Q: How long does it take to finish the game?
-
A: It depends on your skill and speed, but it can take anywhere from a few hours to a few days to finish the game.
-
Q: Is the game suitable for children?
-
A: Yes, the game is suitable for children of all ages. The game has no violence, gore or inappropriate content. The game is also educational and fun.
-
Q: Is the game free to play?
-
A: Yes, the game is free to play. However, the game contains ads that can be removed by purchasing the premium version of the game.
-
Q: Can I play the game offline?
-
A: Yes, you can play the game offline. However, you will need an internet connection to download the game and access some features such as hints or updates.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Cricket League Full Mod APK Everything You Need to Know.md b/spaces/1phancelerku/anime-remove-background/Cricket League Full Mod APK Everything You Need to Know.md
deleted file mode 100644
index 477bc0da8572f3f3c7889a1fcf94e65d2654f8c6..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Cricket League Full Mod APK Everything You Need to Know.md
+++ /dev/null
@@ -1,99 +0,0 @@
-
-
Cricket League Full Mod APK Download: A Guide for Cricket Fans
-
If you are a fan of cricket, you might have heard of Cricket League, a popular mobile game developed by Miniclip. Cricket League is a realistic and immersive cricket simulation game that lets you play as your favorite teams and players in various modes and tournaments. You can customize your team, choose your batting and bowling style, and compete with other players online.
-
However, if you want to enjoy the game to the fullest, you might want to download the mod apk version of Cricket League. A mod apk is a modified version of an original app that gives you access to features that are not available in the official version. In this article, we will tell you everything you need to know about Cricket League Full Mod APK, including its features, how to download and install it, and its pros and cons.
Cricket League Full Mod APK is a hacked version of Cricket League that gives you unlimited coins and gems, which are the in-game currencies. You can use these coins and gems to unlock all the players, modes, stadiums, and equipment in the game. You can also customize your players' appearance, skills, and attributes to suit your preferences.
-
Another feature of Cricket League Full Mod APK is that it removes all the ads that might interrupt your gameplay. You can enjoy the game without any distractions or interruptions. Moreover, Cricket League Full Mod APK allows you to play all the modes that are available in the game, such as Quick Match, World Cup, T20 Blast, Super Over, and more. You can also play online with other players who have the mod apk version.
-
Cricket League Full Mod APK is also easy to install and does not require root access or any other permissions. You just need to download the mod apk file from a trusted source and follow some simple steps to install it on your device.
-
How to Download and Install Cricket League Full Mod APK
-
If you want to download and install Cricket League Full Mod APK on your device, here are the steps you need to follow:
-
-
Enable unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on. This will allow you to install apps from sources other than the Google Play Store.
-
Download the mod apk file from a trusted source. You can search for "Cricket League Full Mod APK" on Google or use this link to download it directly.
-
Locate and install the mod apk file. Once you have downloaded the file, go to your file manager and find the file. Tap on it and follow the instructions to install it on your device.
-
Launch the game and enjoy. After installing the mod apk file, you can launch the game from your app drawer or home screen. You will see that you have unlimited coins and gems and all the features unlocked in the game.
-
-
Pros and Cons of Cricket League Full Mod APK
-
Cricket League Full Mod APK has many advantages, but it also has some drawbacks. Here are some of them:
-
Pros
-
-
More fun. With unlimited coins and gems, you can unlock all the players, modes, stadiums, and equipment in the game. You can also customize your players' appearance, skills, and attributes to suit your preferences. You can also play online with other players who have the mod apk version. This makes the game more fun and exciting.
-
More options. With Cricket League Full Mod APK, you can play all the modes that are available in the game, such as Quick Match, World Cup, T20 Blast, Super Over, and more. You can also choose from different teams and players from around the world. You can also switch between batting and bowling at any time.
-
More customization. With Cricket League Full Mod APK, you can customize your team, choose your batting and bowling style, and adjust the difficulty level of the game. You can also change the camera angle, the pitch condition, and the weather in the game. You can also create your own tournaments and leagues with your own rules and settings.
-
-
Cons
-
-
Risk of malware. Downloading and installing mod apk files from unknown sources can expose your device to malware and viruses that can harm your device or steal your personal information. You should always scan the mod apk file before installing it and use a reliable antivirus app on your device.
-
Ban from official servers. Using mod apk files can violate the terms and conditions of the original app and result in a ban from the official servers. You might not be able to play online with other players who have the official version of the game or access the updates and features that are released by the developers.
-
Compatibility issues. Mod apk files might not be compatible with all devices or versions of Android. You might experience crashes, glitches, or errors while playing the game or installing the mod apk file. You should always check the compatibility of the mod apk file before downloading and installing it.
-
-
Conclusion and FAQs
-
Cricket League is a great game for cricket fans who want to experience the thrill and excitement of playing cricket on their mobile devices. However, if you want to unlock all the features and enjoy the game to the fullest, you might want to download Cricket League Full Mod APK, which gives you unlimited coins and gems, all players and modes unlocked, no ads, and easy installation.
-
However, you should also be aware of the risks and drawbacks of using mod apk files, such as malware, ban from official servers, and compatibility issues. You should always download mod apk files from trusted sources and scan them before installing them. You should also backup your data before using mod apk files and uninstall them if you encounter any problems.
-
cricket league mod apk unlimited money
-cricket league hack apk free download
-cricket league premium apk unlocked
-cricket league pro mod apk latest version
-cricket league 2023 mod apk download
-cricket league game mod apk android 1
-cricket league online mod apk no ads
-cricket league 3d mod apk revdl
-cricket league fantasy mod apk unlimited gems
-cricket league manager mod apk download
-cricket league world cup mod apk offline
-cricket league simulator mod apk rexdl
-cricket league tournament mod apk unlimited coins
-cricket league champions mod apk download
-cricket league 2022 mod apk android
-cricket league ultimate mod apk no root
-cricket league real mod apk hack
-cricket league super mod apk online
-cricket league 2021 mod apk free
-cricket league mega mod apk obb
-cricket league vip mod apk download
-cricket league fun mod apk unlimited everything
-cricket league master mod apk latest
-cricket league 2020 mod apk update
-cricket league best mod apk download
-cricket league action mod apk no verification
-cricket league dream mod apk unlimited players
-cricket league star mod apk download
-cricket league epic mod apk android oyun club
-cricket league legend mod apk free download
-cricket league adventure mod apk unlimited tickets
-cricket league hero mod apk download
-cricket league classic mod apk android republic
-cricket league amazing mod apk unlimited lives
-cricket league blast mod apk download
-cricket league power mod apk unlimited energy
-cricket league fever mod apk download
-cricket league battle mod apk android zone
-cricket league glory mod apk unlimited gold
-cricket league challenge mod apk download
-cricket league strike mod apk unlimited balls
-cricket league rush mod apk download
-cricket league blitz mod apk unlimited boosters
-cricket league thrill mod apk download
-cricket league storm mod apk unlimited cash
-cricket league smash mod apk download
-cricket league spark mod apk unlimited diamonds
-cricket league firework mod apk download
-cricket league boom mod apk unlimited keys
-
We hope this article has helped you understand everything you need to know about Cricket League Full Mod APK. If you have any questions or feedback, please feel free to leave a comment below. Here are some FAQs that might answer some of your queries:
-
FAQs
-
-
What is Cricket League?
-
Cricket League is a realistic and immersive cricket simulation game developed by Miniclip. It lets you play as your favorite teams and players in various modes and tournaments. You can customize your team, choose your batting and bowling style, and compete with other players online.
-
What is Cricket League Full Mod APK?
-
Cricket League Full Mod APK is a hacked version of Cricket League that gives you unlimited coins and gems, which are the in-game currencies. You can use these coins and gems to unlock all the players, modes, stadiums, and equipment in the game. You can also customize your players' appearance, skills, and attributes to suit your preferences.
-
How to download Cricket League Full Mod APK?
-
To download Cricket League Full Mod APK, you need to enable unknown sources on your device, download the mod apk file from a trusted source , locate and install the mod apk file on your device, and launch the game.
-
What are the pros and cons of Cricket League Full Mod APK?
-
The pros of Cricket League Full Mod APK are more fun, more options, more customization. The cons of Cricket League Full Mod APK are risk of malware, ban from official servers, compatibility issues.
-
Is Cricket League Full Mod APK safe to use?
-
Cricket League Full Mod APK is not completely safe to use as it can expose your device to malware and viruses that can harm your device or steal your personal information. It can also violate the terms and conditions of the original app and result in a ban from the official servers. It can also cause crashes, glitches, or errors on your device. You should always scan the mod apk file before installing it and use a reliable antivirus app on your device. You should also backup your data before using mod apk files and uninstall them if you encounter any problems.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Airtel Payment Bank App and Enjoy Online Banking Services.md b/spaces/1phancelerku/anime-remove-background/Download Airtel Payment Bank App and Enjoy Online Banking Services.md
deleted file mode 100644
index b470f5ef2a9abd1df090df23b49cbff4bf5608a0..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Airtel Payment Bank App and Enjoy Online Banking Services.md
+++ /dev/null
@@ -1,120 +0,0 @@
-
-
How to Download Airtel Payment Bank
-
If you are looking for a convenient and secure way to manage your money, pay bills, shop online, and more, then you should consider downloading Airtel Payment Bank. Airtel Payment Bank is a digital banking platform that offers you a range of benefits and features that make your life easier. In this article, we will show you how to download Airtel Payment Bank app and online debit card, and how to use them for various transactions. Let's get started!
Airtel Payment Bank is a payment bank that operates under the license of the Reserve Bank of India (RBI). It is a subsidiary of Bharti Airtel, one of the leading telecom operators in India. Airtel Payment Bank allows you to open a savings account with up to Rs. 1 lakh balance, and get an interest rate of 4% per annum. You can also get a free online debit card that you can use for online payments, shopping, and ATM withdrawals. Some of the benefits of Airtel Payment Bank are:
-
-
You can access your account anytime, anywhere through the Airtel Thanks app or the website.
-
You can get cashback and discounts on various transactions such as recharges, bill payments, movie tickets, etc.
-
You can transfer money to any bank account or mobile number using UPI, IMPS, or NEFT.
-
You can withdraw cash from any of the over 5 lakh banking points across India.
-
You can enjoy zero balance and zero maintenance charges on your account.
-
-
How to Download Airtel Payment Bank App?
-
To download Airtel Payment Bank app, you need to follow these simple steps:
-
-
Visit the official website of Airtel Payment Bank (^1^) or go to the app store of your device (Google Play Store or Apple App Store) and search for "Airtel Thanks".
-
Enter your mobile number and click on "Get OTP". You will receive a one-time password (OTP) on your phone.
-
Enter the OTP and click on "Verify". You will be redirected to the app download page.
-
Click on "Install" and wait for the app to download and install on your device.
-
Open the app and create your account by entering your personal details, Aadhaar number, PAN number, etc. You will also need to set a four-digit PIN for your account.
-
-
How to Download Airtel Payment Bank Online Debit Card?
-
To download Airtel Payment Bank online debit card, you need to follow these steps:
-
-
Log in to your Airtel Payment Bank account using the app or the website.
-
Go to the online debit card section and click on " Generate Card". You will see your card details such as card number, expiry date, and CVV.
-
Click on "Download Card" and save the PDF file on your device. You can also print the card if you want.
-
You can use your online debit card for any online transactions that accept Visa cards. You can also link your card to any payment app such as Google Pay, PhonePe, Paytm, etc.
-
-
How to Use Airtel Payment Bank for Various Transactions?
-
Airtel Payment Bank offers you a variety of services and transactions that you can use with ease and convenience. Here are some of the common transactions that you can do with Airtel Payment Bank:
-
Recharges and Bill Payments
-
You can recharge your mobile, DTH, or broadband service using Airtel Payment Bank. You can also pay your electricity, water, gas, or postpaid bills using the app or the website. You can get cashback and discounts on some of these transactions. To recharge or pay bills, you need to:
-
-
Log in to your Airtel Payment Bank account and select the service that you want to recharge or pay.
-
Enter the amount and the details of the service provider.
-
Choose your payment method (wallet balance, online debit card, UPI, etc.) and confirm the transaction.
-
You will receive a confirmation message and a receipt on your phone and email.
-
-
Shopping and Online Payments
-
You can shop online from various websites and apps that accept Airtel Payment Bank as a payment option. You can also make online payments for various services such as food delivery, cab booking, movie tickets, etc. using Airtel Payment Bank. You can get cashback and discounts on some of these transactions. To shop or pay online, you need to:
-
-
Select Airtel Payment Bank as your payment option on the website or app that you are using.
-
Enter your mobile number and OTP to verify your identity.
-
Choose your payment method (wallet balance, online debit card, UPI, etc.) and confirm the transaction.
-
You will receive a confirmation message and a receipt on your phone and email.
-
-
Money Transfer and Cash Withdrawal
-
You can transfer money to any bank account or mobile number using Airtel Payment Bank. You can also withdraw cash from any of the over 5 lakh banking points across India using your mobile number and PIN. You can get cashback and discounts on some of these transactions. To transfer money or withdraw cash, you need to:
-
-
Log in to your Airtel Payment Bank account and select the option of money transfer or cash withdrawal.
-
Enter the amount and the details of the recipient (bank account number, IFSC code, mobile number, etc.) or the banking point (name, location, etc.).
-
Choose your payment method (wallet balance, online debit card, UPI, etc.) and confirm the transaction.
-
You will receive a confirmation message and a receipt on your phone and email.
-
-
Conclusion
-
Airtel Payment Bank is a great way to manage your money digitally and enjoy various benefits and features. It is easy to download Airtel Payment Bank app and online debit card, and use them for various transactions. You can also save money by getting cashback and discounts on some of these transactions. So what are you waiting for? Download Airtel Payment Bank today and experience the convenience of digital banking!
-
How to download airtel payment bank app
-Download airtel payment bank statement
-Download airtel payment bank apk
-Download airtel payment bank kyc form
-Download airtel payment bank online debit card
-Download airtel payment bank passbook
-Download airtel payment bank app for pc
-Download airtel payment bank app for android
-Download airtel payment bank app for ios
-Download airtel payment bank app latest version
-Benefits of downloading airtel payment bank app
-Steps to download airtel payment bank app
-Download airtel payment bank customer care number
-Download airtel payment bank logo
-Download airtel payment bank offer
-Download airtel payment bank referral code
-Download airtel payment bank account opening form
-Download airtel payment bank mini statement
-Download airtel payment bank cheque book
-Download airtel payment bank interest rate
-Download airtel payment bank ifsc code
-Download airtel payment bank atm card
-Download airtel payment bank upi pin
-Download airtel payment bank qr code
-Download airtel payment bank fastag
-Download airtel payment bank wallet
-Download airtel payment bank recharge plan
-Download airtel payment bank dth recharge
-Download airtel payment bank electricity bill pay
-Download airtel payment bank gas bill pay
-Download airtel payment bank water bill pay
-Download airtel payment bank broadband bill pay
-Download airtel payment bank insurance premium pay
-Download airtel payment bank loan repayment
-Download airtel payment bank money transfer
-Download airtel payment bank cash deposit
-Download airtel payment bank cash withdrawal
-Download airtel payment bank balance check
-Download airtel payment bank transaction history
-Download airtel payment bank rewards program
-Download airtel payment bank cashback offer
-Download airtel payment bank coupon code
-Download airtel payment bank promo code
-Download airtel payment bank review and rating
-Download airtel payment bank faq and help center
-Download airtel payment bank terms and conditions
-Download airtel payment bank privacy policy
-Download airtel payments banks careers and jobs
-Download Aitel Payment Bank Branch Locator
-
FAQs
-
Q1: What are the charges for using Airtel Payment Bank?
-
A1: There are no charges for opening an account, maintaining a zero balance, or getting an online debit card with Airtel Payment Bank. However, there may be some charges for certain transactions such as money transfer, cash withdrawal, ATM usage, etc. depending on the amount and frequency of the transaction. You can check the latest charges on the website or app of Airtel Payment Bank.
-
Q2: How can I check my balance and transaction history?
-
A2: You can check your balance and transaction history by logging in to your Airtel Payment Bank account using the app or the website. You can also dial *400# from your registered mobile number and follow the instructions to check your balance.
-
Q3: How can I contact customer care for any queries or issues?
-
A3: You can contact customer care for any queries or issues by calling 400 from your registered mobile number or calling 8800688006 from any other number. You can also email your query or issue to wecare@airtelbank.com. Alternatively, you can visit the nearest banking point and get assistance from the staff.
-
Q4: Is Airtel Payment Bank safe and secure?
-
A4: Yes, Airtel Payment Bank is safe and secure. It uses advanced encryption and security protocols to protect your data and transactions. It also complies with the RBI guidelines and regulations for payment banks. You can also safeguard your account by keeping your PIN confidential and changing it regularly.
-
Q5: What are the eligibility criteria for opening an Airtel Payment Bank account?
-
A5: To open an Airtel Payment Bank account, you need to be an Indian citizen above 18 years of age. You also need to have a valid Aadhaar number and PAN number. You can open only one account per mobile number with Airtel Payment Bank.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Nubank Fake APK for Android 2023 Explore the Features of the Famous App.md b/spaces/1phancelerku/anime-remove-background/Download Nubank Fake APK for Android 2023 Explore the Features of the Famous App.md
deleted file mode 100644
index 3d8a9b9e993b75faa2873cb42334da385e4d5725..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Nubank Fake APK for Android 2023 Explore the Features of the Famous App.md
+++ /dev/null
@@ -1,138 +0,0 @@
-
-
Nubank Fake APK Download 2023: What You Need to Know
-
If you are looking for a way to manage your money with freedom, convenience, and security, you might have heard of Nubank, the largest digital bank in Latin America. But before you download the Nubank app, you need to be aware of the risks of fake apps that can harm your device and steal your information. In this article, we will explain what Nubank is, why it is popular, what a fake APK is, how to spot and avoid it, and how to download and install the genuine Nubank app safely.
-
What is Nubank and why is it popular?
-
Nubank is a digital bank that helps customers in Brazil, Mexico, and Colombia manage their money with freedom. Nubank is just one digital banking option in the Latin American world. It offers a variety of services for its customers, such as credit card applications, loans, life insurance, and business accounts. Nubank wants to improve the banking experience for customers across Latin America by using proprietary technology to create streamlined, uncomplicated, and completely digital banking options for its customers.
The benefits of using a Nubank personal or business account include:
-
-
No annuity or maintenance fees
-
Free and unlimited transfers to any bank
-
Real-time tracking of transactions
-
Easy contact with customer support
-
Transparent, simple, and fair experience
-
-
The Nubank app also allows users to stay on top of payments, save money, and track spending from anywhere. Users can block their credit card, apply for a limit raise, or access rewards programs from their mobile device.
Nubank currently only operates in Brazil, Colombia, and Mexico. To use the Nubank app, you need to have a compatible device with Android 4.4 or higher or iOS 10 or higher. You also need to have an internet connection to access the app's features. To open an account with Nubank, you need to provide some personal information, such as your name, email address, phone number, date of birth, and tax identification number.
-
What is a fake APK and why is it dangerous?
-
An APK (Android Package Kit) is a file format that contains all the elements needed to install an app on an Android device. A fake APK is an app that imitates a legitimate one but instead carries out malicious activities. These activities include monitoring your activity, installing malware, showing annoying ads, or stealing your personal information.
-
How fake apps work and what they can do
-
Fake apps can be distributed in various ways. They can be hosted on third-party app stores or fake app stores. Cybercriminals can even use official app stores to distribute fake apps, despite the security measures in place. A cybercriminal can register themselves as a developer on any app store, download a legitimate app, and rewrite it using malicious code. Then, they can upload their fake app to the app store.
-
Once you download a fake app on your device, it can perform various actions without your consent or knowledge. For example, it can:
-
-
Send premium SMS messages or make calls to charge you money
-
Access your contacts, photos, messages, or other data
-
Download more malware or adware on your device
-
Redirect you to phishing websites or fake login pages
-
Use your device as part of a botnet to launch cyberattacks
-
-
These actions can compromise your device's performance, security, and privacy. You can lose money, data, or even your identity if you fall victim to a fake app.
-
How to spot and avoid fake apps
-
To protect yourself from fake apps, you need to be vigilant and careful when downloading apps. Here are some tips to help you spot and avoid fake apps:
-
-
Check the app's name, developer, description, and reviews. Look for spelling errors, grammar mistakes, low ratings, or negative feedback.
-
Compare the app with the official website of the service or company. Look for inconsistencies or discrepancies in the logo, design, or features.
-
Avoid downloading apps from third-party app stores or unknown sources. Use only trusted and verified app stores, such as Google Play Store or Apple App Store.
-
Check the app's permissions and settings. Avoid apps that ask for unnecessary or excessive permissions, such as access to your camera, microphone, location, or contacts.
-
Use a reputable antivirus or security app on your device. Scan your device regularly and remove any suspicious or unwanted apps.
-
-
How to download and install the genuine Nubank app safely
-
If you want to enjoy the benefits of Nubank without risking your device or data, you need to download and install the genuine Nubank app safely. Here are the steps to do so:
-
How to find and verify the official Nubank app
-
The official Nubank app is available on Google Play Store for Android devices and Apple App Store for iOS devices. To find and verify the official Nubank app, you can:
-
-
Search for "Nubank" on the app store. Make sure the app's name is spelled correctly and matches the logo of Nubank.
-
Check the app's developer name. The official Nubank app is developed by "Nu Pagamentos S.A." for Android devices and "Nubank" for iOS devices.
-
Check the app's rating, reviews, and downloads. The official Nubank app has a high rating (4.5 stars or above), positive reviews, and millions of downloads.
-
Check the app's description and screenshots. The official Nubank app has a clear and detailed description of its features and benefits, as well as screenshots that show its interface and functionality.
-
-
How to install and set up the Nubank app on your device
-
Once you have found and verified the official Nubank app, you can install it on your device by following these steps:
-
-
Tap on the "Install" button on the app store. Wait for the app to download and install on your device.
-
Open the app and tap on "Create account". Enter your personal information, such as your name, email address, phone number, date of birth, and tax identification number.
-
Verify your identity by taking a selfie and uploading a photo of your ID document.
-
Wait for Nubank to approve your account. This may take a few minutes or hours depending on their verification process.
-
Once your account is approved, you can access the app's features and services. You can also request a physical credit card that will be delivered to your address.
-
-
Conclusion
-
Nubank is a digital bank that offers a convenient, secure, and transparent way to manage your money with freedom. However, you need to be careful of fake apps that can imitate Nubank and harm your device or data. To avoid fake apps, you need to check the app's name, developer, description, reviews, permissions, and settings before downloading it. You also need to use only trusted and verified app stores to download apps. To download and install the genuine Nubank app safely, you need to find and verify the official Nubank app on Google Play Store or Apple App Store, then follow the steps to install and set up the app on your device.
-
We hope this article has helped you understand what Nubank is, why it is popular, what a fake APK is, how to spot and avoid it, and how to download and install the genuine Nubank app safely. If you have any questions or feedback, please feel free to contact us. We would love to hear from you!
-
Disclaimer: This article is for informational purposes only and does not constitute financial or legal advice. Please consult a professional before making any decisions regarding your money or data.
-
FAQs
-
Here are some frequently asked questions about Nubank and fake apps:
-
-
Is Nubank safe and reliable?
-
Yes, Nubank is safe and reliable. Nubank is regulated by the Central Bank of Brazil, the National Monetary Council, and the Securities and Exchange Commission of Brazil. Nubank also uses advanced encryption and security protocols to protect your data and transactions. Nubank has over 40 million customers and has won several awards for its innovation and customer satisfaction.
-
How can I contact Nubank customer support?
-
You can contact Nubank customer support through the app, phone, email, or chat. You can also visit their website or social media pages for more information. Nubank customer support is available 24/7 and speaks Portuguese, Spanish, and English.
-
What are the advantages of using a digital bank over a traditional bank?
-
Some of the advantages of using a digital bank over a traditional bank are:
-
-
You can access your account and services anytime, anywhere, from your mobile device.
-
You can save money on fees, commissions, and interest rates.
-
You can enjoy more flexibility, convenience, and transparency in your banking experience.
-
You can benefit from innovative features, such as rewards programs, cashback, or personal finance tools.
-
-
How can I update the Nubank app?
-
You can update the Nubank app by following these steps:
-
-
Open the app store on your device.
-
Search for "Nubank" and tap on the app.
-
If there is an update available, tap on the "Update" button.
-
Wait for the app to download and install the update.
-
Open the app and enjoy the new features and improvements.
-
-
How can I uninstall the Nubank app?
-
You can uninstall the Nubank app by following these steps:
-
-
Open the settings on your device.
-
Tap on "Apps" or "Applications".
-
Find and tap on "Nubank".
-
Tap on "Uninstall" or "Delete".
-
Confirm your action and wait for the app to be removed from your device.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download Onmyoji Arena APK for Android - Play Offline Strategy Game.md b/spaces/1phancelerku/anime-remove-background/Download Onmyoji Arena APK for Android - Play Offline Strategy Game.md
deleted file mode 100644
index 2eb756dab507a4c5003f5b19114be770ba232c88..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download Onmyoji Arena APK for Android - Play Offline Strategy Game.md
+++ /dev/null
@@ -1,132 +0,0 @@
-
-
Onmyoji Arena APK Offline: How to Play the Game Without Internet Connection
-
Do you love playing Onmyoji Arena, the popular mobile MOBA game based on Japanese folklore and mythology? Do you wish you could play it anytime and anywhere, even without an internet connection? If so, you are in luck. In this article, we will show you how to download and install Onmyoji Arena APK offline, and how to play the game without internet connection. Read on to find out more.
Onmyoji Arena is a mobile game developed by NetEase Games, based on the hit RPG game Onmyoji. It is a 5v5 MOBA game that features stunning 3D graphics, elegant Japanese aesthetics, and a stellar voice cast. The game has over 70 characters, called shikigami, that you can choose from, each with their own unique skills and abilities. You can team up with your friends or other players online, and compete in various modes, such as ranked matches, casual matches, or special events. You can also customize your shikigami with different skins, accessories, and emotes.
-
Why would you want to play Onmyoji Arena offline?
-
The benefits of playing the game without internet connection
-
Playing Onmyoji Arena offline has some advantages over playing it online. For instance:
-
-
You can play the game anytime and anywhere, without worrying about your data usage or wifi availability.
-
You can avoid lag, disconnects, or other network issues that might affect your gameplay or performance.
-
You can practice your skills and strategies with different shikigami, without affecting your rank or reputation.
-
You can enjoy the game at your own pace, without pressure or competition from other players.
-
-
The drawbacks of playing the game without internet connection
-
However, playing Onmyoji Arena offline also has some disadvantages over playing it online. For example:
-
onmyoji arena mod apk offline
-onmyoji arena apk download offline
-onmyoji arena offline mode apk
-onmyoji arena latest version offline apk
-onmyoji arena apk obb offline
-onmyoji arena hack apk offline
-onmyoji arena apk data offline
-onmyoji arena apk pure offline
-onmyoji arena apk revdl offline
-onmyoji arena apk rexdl offline
-onmyoji arena apk mirror offline
-onmyoji arena apk update offline
-onmyoji arena apk android offline
-onmyoji arena apk ios offline
-onmyoji arena apk pc offline
-onmyoji arena apk no internet offline
-onmyoji arena apk free download offline
-onmyoji arena apk full version offline
-onmyoji arena apk unlimited money offline
-onmyoji arena apk cheat offline
-onmyoji arena apk english offline
-onmyoji arena apk chinese offline
-onmyoji arena apk global offline
-onmyoji arena apk japan offline
-onmyoji arena apk korea offline
-onmyoji arena apk vietnam offline
-onmyoji arena apk indonesia offline
-onmyoji arena apk malaysia offline
-onmyoji arena apk philippines offline
-onmyoji arena apk thailand offline
-onmyoji arena 3v3v3 battle royale mode apk offline
-onmyoji arena 5v5 moba game apk offline
-onmyoji arena fair and balanced gameplay apk offline
-onmyoji arena elegant japanese aesthetics and voice cast apk offline
-onmyoji arena gorgeous 3d models and graphics apk offline
-onmyoji arena innovative map and gameplay features apk offline
-onmyoji arena original characters and shikigami from the Onmyoji world apk offline
-onmyoji arena new skins and events every week apk offline
-onmyoji arena cross-platform play with pc and mobile players apk offline
-onmyoji arena easy to learn and play with intuitive controls and tutorials apk offline
-how to install Onmyoji Arena APK Offline
-how to play Onmyoji Arena APK Offline
-how to update Onmyoji Arena APK Offline
-how to hack Onmyoji Arena APK Offline
-how to fix Onmyoji Arena APK Offline errors
-how to uninstall Onmyoji Arena APK Offline
-how to backup Onmyoji Arena APK Offline data
-how to transfer Onmyoji Arena APK Offline account
-how to redeem Onmyo
-
-
You will not be able to access some features or modes that require internet connection, such as ranked matches, casual matches, or special events.
-
You will not be able to update your game or download new content that might be released by the developers.
-
You will not be able to interact with other players or join a guild.
-
You will not be able to earn rewards or achievements that are based on online activities.
-
-
How to download and install Onmyoji Arena APK offline?
-
The steps to get the game on your Android device
-
If you want to play Onmyoji Arena offline, you will need to download and install the APK file of the game on your Android device. Here are the steps to do so:
-
-
Go to a trusted website that offers Onmyoji Arena APK offline download, such as [APKCombo](^1^) or [Google Play Store](^2^).
-
Choose the latest version of the game and click on the download button.
-
Wait for the download to finish and locate the APK file on your device.
-
Tap on the APK file and follow the instructions to install the game on your device.
-
Launch the game and enjoy playing it offline.
-
- The precautions to take before installing the game
-
Before you install Onmyoji Arena APK offline on your device, you should take some precautions to ensure your safety and security. Here are some tips to follow:
-
-
Make sure you have enough storage space on your device to install the game.
-
Make sure you have a backup of your data and settings in case something goes wrong during the installation.
-
Make sure you download the APK file from a reliable and reputable source, and scan it for viruses or malware before installing it.
-
Make sure you enable the option to install apps from unknown sources on your device settings, and disable it after the installation is done.
-
Make sure you agree to the terms and conditions of the game before installing it.
-
-
How to play Onmyoji Arena offline?
-
The modes and options available in the offline mode
-
Once you have installed Onmyoji Arena APK offline on your device, you can play the game without internet connection. However, you will only be able to access some modes and options in the offline mode. Here are some of them:
-
-
You can play the tutorial mode, where you can learn the basics of the game and practice with different shikigami.
-
You can play the practice mode, where you can choose any shikigami and any map, and play against AI opponents or bots.
-
You can play the custom mode, where you can create your own match settings, such as the number of players, the difficulty level, and the map.
-
You can play the story mode, where you can follow the plot of the game and unlock new shikigami and skins.
-
You can access the shikigami gallery, where you can view the details and stats of each shikigami, as well as their skins, accessories, and emotes.
-
-
The tips and tricks to enjoy the game offline
-
Playing Onmyoji Arena offline can be fun and rewarding, if you know how to make the most of it. Here are some tips and tricks to enjoy the game offline:
-
-
Try different shikigami and find out which ones suit your playstyle and preferences.
-
Experiment with different builds and items for each shikigami, and see how they affect their performance.
-
Learn the strengths and weaknesses of each shikigami, and how to counter them effectively.
-
Master the mechanics and strategies of each map, such as the objectives, the lanes, the jungle, and the bosses.
-
Challenge yourself by increasing the difficulty level or changing the match settings in the custom mode.
-
-
Conclusion
-
Onmyoji Arena is a great game that you can play online or offline. If you want to play it offline, you will need to download and install Onmyoji Arena APK offline on your Android device. You will be able to access some modes and options in the offline mode, such as tutorial, practice, custom, and story. You will also be able to enjoy the game offline by trying different shikigami, builds, items, maps, and settings. However, you will not be able to access some features or modes that require internet connection, such as ranked matches, casual matches, or special events. You will also not be able to update your game or download new content that might be released by the developers. You will also not be able to interact with other players or join a guild. You will also not be able to earn rewards or achievements that are based on online activities. Therefore, playing Onmyoji Arena offline has its pros and cons, and you should decide which mode suits you better. If you are interested in playing Onmyoji Arena offline, you can follow the steps and tips we have provided in this article. We hope you have fun playing Onmyoji Arena offline!
-
FAQs
-
Q: Is Onmyoji Arena APK offline safe to download and install?
-
A: Yes, as long as you download it from a trusted website that offers Onmyoji Arena APK offline download, such as [APKCombo] or [Google Play Store]. You should also scan it for viruses or malware before installing it on your device.
-
Q: Can I play Onmyoji Arena offline on iOS devices?
-
A: No, Onmyoji Arena APK offline is only compatible with Android devices. If you want to play Onmyoji Arena on iOS devices, you will need an internet connection.
-
Q: Can I switch between online and offline mode in Onmyoji Arena?
-
A: Yes, you can switch between online and offline mode in Onmyoji Arena, as long as you have an internet connection. You can do so by tapping on the settings icon on the top right corner of the main screen, and then choosing the online or offline option. However, you should note that some of your data or progress might not be synced or saved when you switch modes.
-
Q: What are the best shikigami to play offline in Onmyoji Arena?
-
A: The best shikigami to play offline in Onmyoji Arena depends on your personal preference and playstyle. However, some of the shikigami that are generally considered to be good for offline mode are:
-
-
Yoto Hime: A powerful samurai who can deal massive damage and execute enemies with her ultimate skill.
-
Yamakaze: A swift ninja who can dash and blink around the map, and assassinate enemies with his stealth and burst.
-
Shuten Doji: A tanky ogre who can absorb damage and heal himself, and stun enemies with his drunken rage.
-
Hana: A graceful healer who can support her allies and herself with her healing and shielding skills.
-
Ootengu: A versatile mage who can cast spells from a distance, and unleash a devastating storm with his ultimate skill.
-
-
Q: How can I get more skins, accessories, and emotes for my shikigami in offline mode?
-
A: Unfortunately, you cannot get more skins, accessories, or emotes for your shikigami in offline mode. You will need to play online mode to earn rewards or purchase items that can unlock more customization options for your shikigami.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/20four60/Auto-GPT/README.md b/spaces/20four60/Auto-GPT/README.md
deleted file mode 100644
index 9f8b3e2d2ea8a43988518fdc8e56935675f89e30..0000000000000000000000000000000000000000
--- a/spaces/20four60/Auto-GPT/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: Zenml Server
-emoji: 🧘
-colorFrom: purple
-colorTo: green
-sdk: docker
-pinned: false
-app_port: 8080
-license: wtfpl
-duplicated_from: zenml/zenml
----
diff --git a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/eval/verification.py b/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/eval/verification.py
deleted file mode 100644
index 253343b83dbf9d1bd154d14ec068e098bf0968db..0000000000000000000000000000000000000000
--- a/spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/eval/verification.py
+++ /dev/null
@@ -1,407 +0,0 @@
-"""Helper for evaluation on the Labeled Faces in the Wild dataset
-"""
-
-# MIT License
-#
-# Copyright (c) 2016 David Sandberg
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to deal
-# in the Software without restriction, including without limitation the rights
-# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-# copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in all
-# copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-
-
-import datetime
-import os
-import pickle
-
-import mxnet as mx
-import numpy as np
-import sklearn
-import torch
-from mxnet import ndarray as nd
-from scipy import interpolate
-from sklearn.decomposition import PCA
-from sklearn.model_selection import KFold
-
-
-class LFold:
- def __init__(self, n_splits=2, shuffle=False):
- self.n_splits = n_splits
- if self.n_splits > 1:
- self.k_fold = KFold(n_splits=n_splits, shuffle=shuffle)
-
- def split(self, indices):
- if self.n_splits > 1:
- return self.k_fold.split(indices)
- else:
- return [(indices, indices)]
-
-
-def calculate_roc(thresholds,
- embeddings1,
- embeddings2,
- actual_issame,
- nrof_folds=10,
- pca=0):
- assert (embeddings1.shape[0] == embeddings2.shape[0])
- assert (embeddings1.shape[1] == embeddings2.shape[1])
- nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
- nrof_thresholds = len(thresholds)
- k_fold = LFold(n_splits=nrof_folds, shuffle=False)
-
- tprs = np.zeros((nrof_folds, nrof_thresholds))
- fprs = np.zeros((nrof_folds, nrof_thresholds))
- accuracy = np.zeros((nrof_folds))
- indices = np.arange(nrof_pairs)
-
- if pca == 0:
- diff = np.subtract(embeddings1, embeddings2)
- dist = np.sum(np.square(diff), 1)
-
- for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
- if pca > 0:
- print('doing pca on', fold_idx)
- embed1_train = embeddings1[train_set]
- embed2_train = embeddings2[train_set]
- _embed_train = np.concatenate((embed1_train, embed2_train), axis=0)
- pca_model = PCA(n_components=pca)
- pca_model.fit(_embed_train)
- embed1 = pca_model.transform(embeddings1)
- embed2 = pca_model.transform(embeddings2)
- embed1 = sklearn.preprocessing.normalize(embed1)
- embed2 = sklearn.preprocessing.normalize(embed2)
- diff = np.subtract(embed1, embed2)
- dist = np.sum(np.square(diff), 1)
-
- # Find the best threshold for the fold
- acc_train = np.zeros((nrof_thresholds))
- for threshold_idx, threshold in enumerate(thresholds):
- _, _, acc_train[threshold_idx] = calculate_accuracy(
- threshold, dist[train_set], actual_issame[train_set])
- best_threshold_index = np.argmax(acc_train)
- for threshold_idx, threshold in enumerate(thresholds):
- tprs[fold_idx, threshold_idx], fprs[fold_idx, threshold_idx], _ = calculate_accuracy(
- threshold, dist[test_set],
- actual_issame[test_set])
- _, _, accuracy[fold_idx] = calculate_accuracy(
- thresholds[best_threshold_index], dist[test_set],
- actual_issame[test_set])
-
- tpr = np.mean(tprs, 0)
- fpr = np.mean(fprs, 0)
- return tpr, fpr, accuracy
-
-
-def calculate_accuracy(threshold, dist, actual_issame):
- predict_issame = np.less(dist, threshold)
- tp = np.sum(np.logical_and(predict_issame, actual_issame))
- fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
- tn = np.sum(
- np.logical_and(np.logical_not(predict_issame),
- np.logical_not(actual_issame)))
- fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
-
- tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn)
- fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn)
- acc = float(tp + tn) / dist.size
- return tpr, fpr, acc
-
-
-def calculate_val(thresholds,
- embeddings1,
- embeddings2,
- actual_issame,
- far_target,
- nrof_folds=10):
- assert (embeddings1.shape[0] == embeddings2.shape[0])
- assert (embeddings1.shape[1] == embeddings2.shape[1])
- nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
- nrof_thresholds = len(thresholds)
- k_fold = LFold(n_splits=nrof_folds, shuffle=False)
-
- val = np.zeros(nrof_folds)
- far = np.zeros(nrof_folds)
-
- diff = np.subtract(embeddings1, embeddings2)
- dist = np.sum(np.square(diff), 1)
- indices = np.arange(nrof_pairs)
-
- for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
-
- # Find the threshold that gives FAR = far_target
- far_train = np.zeros(nrof_thresholds)
- for threshold_idx, threshold in enumerate(thresholds):
- _, far_train[threshold_idx] = calculate_val_far(
- threshold, dist[train_set], actual_issame[train_set])
- if np.max(far_train) >= far_target:
- f = interpolate.interp1d(far_train, thresholds, kind='slinear')
- threshold = f(far_target)
- else:
- threshold = 0.0
-
- val[fold_idx], far[fold_idx] = calculate_val_far(
- threshold, dist[test_set], actual_issame[test_set])
-
- val_mean = np.mean(val)
- far_mean = np.mean(far)
- val_std = np.std(val)
- return val_mean, val_std, far_mean
-
-
-def calculate_val_far(threshold, dist, actual_issame):
- predict_issame = np.less(dist, threshold)
- true_accept = np.sum(np.logical_and(predict_issame, actual_issame))
- false_accept = np.sum(
- np.logical_and(predict_issame, np.logical_not(actual_issame)))
- n_same = np.sum(actual_issame)
- n_diff = np.sum(np.logical_not(actual_issame))
- # print(true_accept, false_accept)
- # print(n_same, n_diff)
- val = float(true_accept) / float(n_same)
- far = float(false_accept) / float(n_diff)
- return val, far
-
-
-def evaluate(embeddings, actual_issame, nrof_folds=10, pca=0):
- # Calculate evaluation metrics
- thresholds = np.arange(0, 4, 0.01)
- embeddings1 = embeddings[0::2]
- embeddings2 = embeddings[1::2]
- tpr, fpr, accuracy = calculate_roc(thresholds,
- embeddings1,
- embeddings2,
- np.asarray(actual_issame),
- nrof_folds=nrof_folds,
- pca=pca)
- thresholds = np.arange(0, 4, 0.001)
- val, val_std, far = calculate_val(thresholds,
- embeddings1,
- embeddings2,
- np.asarray(actual_issame),
- 1e-3,
- nrof_folds=nrof_folds)
- return tpr, fpr, accuracy, val, val_std, far
-
-@torch.no_grad()
-def load_bin(path, image_size):
- try:
- with open(path, 'rb') as f:
- bins, issame_list = pickle.load(f) # py2
- except UnicodeDecodeError as e:
- with open(path, 'rb') as f:
- bins, issame_list = pickle.load(f, encoding='bytes') # py3
- data_list = []
- for flip in [0, 1]:
- data = torch.empty((len(issame_list) * 2, 3, image_size[0], image_size[1]))
- data_list.append(data)
- for idx in range(len(issame_list) * 2):
- _bin = bins[idx]
- img = mx.image.imdecode(_bin)
- if img.shape[1] != image_size[0]:
- img = mx.image.resize_short(img, image_size[0])
- img = nd.transpose(img, axes=(2, 0, 1))
- for flip in [0, 1]:
- if flip == 1:
- img = mx.ndarray.flip(data=img, axis=2)
- data_list[flip][idx][:] = torch.from_numpy(img.asnumpy())
- if idx % 1000 == 0:
- print('loading bin', idx)
- print(data_list[0].shape)
- return data_list, issame_list
-
-@torch.no_grad()
-def test(data_set, backbone, batch_size, nfolds=10):
- print('testing verification..')
- data_list = data_set[0]
- issame_list = data_set[1]
- embeddings_list = []
- time_consumed = 0.0
- for i in range(len(data_list)):
- data = data_list[i]
- embeddings = None
- ba = 0
- while ba < data.shape[0]:
- bb = min(ba + batch_size, data.shape[0])
- count = bb - ba
- _data = data[bb - batch_size: bb]
- time0 = datetime.datetime.now()
- img = ((_data / 255) - 0.5) / 0.5
- net_out: torch.Tensor = backbone(img)
- _embeddings = net_out.detach().cpu().numpy()
- time_now = datetime.datetime.now()
- diff = time_now - time0
- time_consumed += diff.total_seconds()
- if embeddings is None:
- embeddings = np.zeros((data.shape[0], _embeddings.shape[1]))
- embeddings[ba:bb, :] = _embeddings[(batch_size - count):, :]
- ba = bb
- embeddings_list.append(embeddings)
-
- _xnorm = 0.0
- _xnorm_cnt = 0
- for embed in embeddings_list:
- for i in range(embed.shape[0]):
- _em = embed[i]
- _norm = np.linalg.norm(_em)
- _xnorm += _norm
- _xnorm_cnt += 1
- _xnorm /= _xnorm_cnt
-
- acc1 = 0.0
- std1 = 0.0
- embeddings = embeddings_list[0] + embeddings_list[1]
- embeddings = sklearn.preprocessing.normalize(embeddings)
- print(embeddings.shape)
- print('infer time', time_consumed)
- _, _, accuracy, val, val_std, far = evaluate(embeddings, issame_list, nrof_folds=nfolds)
- acc2, std2 = np.mean(accuracy), np.std(accuracy)
- return acc1, std1, acc2, std2, _xnorm, embeddings_list
-
-
-def dumpR(data_set,
- backbone,
- batch_size,
- name='',
- data_extra=None,
- label_shape=None):
- print('dump verification embedding..')
- data_list = data_set[0]
- issame_list = data_set[1]
- embeddings_list = []
- time_consumed = 0.0
- for i in range(len(data_list)):
- data = data_list[i]
- embeddings = None
- ba = 0
- while ba < data.shape[0]:
- bb = min(ba + batch_size, data.shape[0])
- count = bb - ba
-
- _data = nd.slice_axis(data, axis=0, begin=bb - batch_size, end=bb)
- time0 = datetime.datetime.now()
- if data_extra is None:
- db = mx.io.DataBatch(data=(_data,), label=(_label,))
- else:
- db = mx.io.DataBatch(data=(_data, _data_extra),
- label=(_label,))
- model.forward(db, is_train=False)
- net_out = model.get_outputs()
- _embeddings = net_out[0].asnumpy()
- time_now = datetime.datetime.now()
- diff = time_now - time0
- time_consumed += diff.total_seconds()
- if embeddings is None:
- embeddings = np.zeros((data.shape[0], _embeddings.shape[1]))
- embeddings[ba:bb, :] = _embeddings[(batch_size - count):, :]
- ba = bb
- embeddings_list.append(embeddings)
- embeddings = embeddings_list[0] + embeddings_list[1]
- embeddings = sklearn.preprocessing.normalize(embeddings)
- actual_issame = np.asarray(issame_list)
- outname = os.path.join('temp.bin')
- with open(outname, 'wb') as f:
- pickle.dump((embeddings, issame_list),
- f,
- protocol=pickle.HIGHEST_PROTOCOL)
-
-
-# if __name__ == '__main__':
-#
-# parser = argparse.ArgumentParser(description='do verification')
-# # general
-# parser.add_argument('--data-dir', default='', help='')
-# parser.add_argument('--model',
-# default='../model/softmax,50',
-# help='path to load model.')
-# parser.add_argument('--target',
-# default='lfw,cfp_ff,cfp_fp,agedb_30',
-# help='test targets.')
-# parser.add_argument('--gpu', default=0, type=int, help='gpu id')
-# parser.add_argument('--batch-size', default=32, type=int, help='')
-# parser.add_argument('--max', default='', type=str, help='')
-# parser.add_argument('--mode', default=0, type=int, help='')
-# parser.add_argument('--nfolds', default=10, type=int, help='')
-# args = parser.parse_args()
-# image_size = [112, 112]
-# print('image_size', image_size)
-# ctx = mx.gpu(args.gpu)
-# nets = []
-# vec = args.model.split(',')
-# prefix = args.model.split(',')[0]
-# epochs = []
-# if len(vec) == 1:
-# pdir = os.path.dirname(prefix)
-# for fname in os.listdir(pdir):
-# if not fname.endswith('.params'):
-# continue
-# _file = os.path.join(pdir, fname)
-# if _file.startswith(prefix):
-# epoch = int(fname.split('.')[0].split('-')[1])
-# epochs.append(epoch)
-# epochs = sorted(epochs, reverse=True)
-# if len(args.max) > 0:
-# _max = [int(x) for x in args.max.split(',')]
-# assert len(_max) == 2
-# if len(epochs) > _max[1]:
-# epochs = epochs[_max[0]:_max[1]]
-#
-# else:
-# epochs = [int(x) for x in vec[1].split('|')]
-# print('model number', len(epochs))
-# time0 = datetime.datetime.now()
-# for epoch in epochs:
-# print('loading', prefix, epoch)
-# sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
-# # arg_params, aux_params = ch_dev(arg_params, aux_params, ctx)
-# all_layers = sym.get_internals()
-# sym = all_layers['fc1_output']
-# model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
-# # model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))], label_shapes=[('softmax_label', (args.batch_size,))])
-# model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0],
-# image_size[1]))])
-# model.set_params(arg_params, aux_params)
-# nets.append(model)
-# time_now = datetime.datetime.now()
-# diff = time_now - time0
-# print('model loading time', diff.total_seconds())
-#
-# ver_list = []
-# ver_name_list = []
-# for name in args.target.split(','):
-# path = os.path.join(args.data_dir, name + ".bin")
-# if os.path.exists(path):
-# print('loading.. ', name)
-# data_set = load_bin(path, image_size)
-# ver_list.append(data_set)
-# ver_name_list.append(name)
-#
-# if args.mode == 0:
-# for i in range(len(ver_list)):
-# results = []
-# for model in nets:
-# acc1, std1, acc2, std2, xnorm, embeddings_list = test(
-# ver_list[i], model, args.batch_size, args.nfolds)
-# print('[%s]XNorm: %f' % (ver_name_list[i], xnorm))
-# print('[%s]Accuracy: %1.5f+-%1.5f' % (ver_name_list[i], acc1, std1))
-# print('[%s]Accuracy-Flip: %1.5f+-%1.5f' % (ver_name_list[i], acc2, std2))
-# results.append(acc2)
-# print('Max of [%s] is %1.5f' % (ver_name_list[i], np.max(results)))
-# elif args.mode == 1:
-# raise ValueError
-# else:
-# model = nets[0]
-# dumpR(ver_list[0], model, args.batch_size, args.target)
diff --git a/spaces/AI-Hobbyist/Hoyo-RVC/Changelog_KO.md b/spaces/AI-Hobbyist/Hoyo-RVC/Changelog_KO.md
deleted file mode 100644
index 37e0891a5c5d22288b525f8bdab4d9c041601122..0000000000000000000000000000000000000000
--- a/spaces/AI-Hobbyist/Hoyo-RVC/Changelog_KO.md
+++ /dev/null
@@ -1,91 +0,0 @@
-### 2023년 6월 18일 업데이트
-
-- v2 버전에서 새로운 32k와 48k 사전 학습 모델을 추가.
-- non-f0 모델들의 추론 오류 수정.
-- 학습 세트가 1시간을 넘어가는 경우, 인덱스 생성 단계에서 minibatch-kmeans을 사용해, 학습속도 가속화.
-- [huggingface](https://huggingface.co/spaces/lj1995/vocal2guitar)에서 vocal2guitar 제공.
-- 데이터 처리 단계에서 이상 값 자동으로 제거.
-- ONNX로 내보내는(export) 옵션 탭 추가.
-
-업데이트에 적용되지 않았지만 시도한 것들 :
-
-- 시계열 차원을 추가하여 특징 검색을 진행했지만, 유의미한 효과는 없었습니다.
-- PCA 차원 축소를 추가하여 특징 검색을 진행했지만, 유의미한 효과는 없었습니다.
-- ONNX 추론을 지원하는 것에 실패했습니다. nsf 생성시, Pytorch가 필요하기 때문입니다.
-- 훈련 중에 입력에 대한 음고, 성별, 이퀄라이저, 노이즈 등 무작위로 강화하는 것에, 유의미한 효과는 없었습니다.
-
-추후 업데이트 목록:
-
-- Vocos-RVC (소형 보코더) 통합 예정.
-- 학습 단계에 음고 인식을 위한 Crepe 지원 예정.
-- Crepe의 정밀도를 REC-config와 동기화하여 지원 예정.
-- FO 에디터 지원 예정.
-
-### 2023년 5월 28일 업데이트
-
-- v2 jupyter notebook 추가, 한국어 업데이트 로그 추가, 의존성 모듈 일부 수정.
-- 무성음 및 숨소리 보호 모드 추가.
-- crepe-full pitch 감지 지원.
-- UVR5 보컬 분리: 디버브 및 디-에코 모델 지원.
-- index 이름에 experiment 이름과 버전 추가.
-- 배치 음성 변환 처리 및 UVR5 보컬 분리 시, 사용자가 수동으로 출력 오디오의 내보내기(export) 형식을 선택할 수 있도록 지원.
-- 32k 훈련 모델 지원 종료.
-
-### 2023년 5월 13일 업데이트
-
-- 원클릭 패키지의 이전 버전 런타임 내, 불필요한 코드(infer_pack 및 uvr5_pack) 제거.
-- 훈련 세트 전처리의 유사 다중 처리 버그 수정.
-- Harvest 피치 인식 알고리즘에 대한 중위수 필터링 반경 조정 추가.
-- 오디오 내보낼 때, 후처리 리샘플링 지원.
-- 훈련에 대한 다중 처리 "n_cpu" 설정이 "f0 추출"에서 "데이터 전처리 및 f0 추출"로 변경.
-- logs 폴더 하의 인덱스 경로를 자동으로 감지 및 드롭다운 목록 기능 제공.
-- 탭 페이지에 "자주 묻는 질문과 답변" 추가. (github RVC wiki 참조 가능)
-- 동일한 입력 오디오 경로를 사용할 때 추론, Harvest 피치를 캐시.
- (주의: Harvest 피치 추출을 사용하면 전체 파이프라인은 길고 반복적인 피치 추출 과정을 거치게됩니다. 캐싱을 하지 않는다면, 첫 inference 이후의 단계에서 timbre, 인덱스, 피치 중위수 필터링 반경 설정 등 대기시간이 엄청나게 길어집니다!)
-
-### 2023년 5월 14일 업데이트
-
-- 입력의 볼륨 캡슐을 사용하여 출력의 볼륨 캡슐을 혼합하거나 대체. (입력이 무음이거나 출력의 노이즈 문제를 최소화 할 수 있습니다. 입력 오디오의 배경 노이즈(소음)가 큰 경우 해당 기능을 사용하지 않는 것이 좋습니다. 기본적으로 비활성화 되어있는 옵션입니다. (1: 비활성화 상태))
-- 추출된 소형 모델을 지정된 빈도로 저장하는 기능을 지원. (다양한 에폭 하에서의 성능을 보려고 하지만 모든 대형 체크포인트를 저장하고 매번 ckpt 처리를 통해 소형 모델을 수동으로 추출하고 싶지 않은 경우 이 기능은 매우 유용합니다)
-- 환경 변수를 설정하여 서버의 전역 프록시로 인한 "연결 오류" 문제 해결.
-- 사전 훈련된 v2 모델 지원. (현재 40k 버전만 테스트를 위해 공개적으로 사용 가능하며, 다른 두 개의 샘플링 비율은 아직 완전히 훈련되지 않아 보류되었습니다.)
-- 추론 전, 1을 초과하는 과도한 볼륨 제한.
-- 데이터 전처리 매개변수 미세 조정.
-
-### 2023년 4월 9일 업데이트
-
-- GPU 이용률 향상을 위해 훈련 파라미터 수정: A100은 25%에서 약 90%로 증가, V100: 50%에서 약 90%로 증가, 2060S: 60%에서 약 85%로 증가, P40: 25%에서 약 95%로 증가.
- 훈련 속도가 크게 향상.
-- 매개변수 기준 변경: total batch_size는 GPU당 batch_size를 의미.
-- total_epoch 변경: 최대 한도가 100에서 1000으로 증가. 기본값이 10에서 20으로 증가.
-- ckpt 추출이 피치를 잘못 인식하여 비정상적인 추론을 유발하는 문제 수정.
-- 분산 훈련 과정에서 각 랭크마다 ckpt를 저장하는 문제 수정.
-- 특성 추출 과정에 나노 특성 필터링 적용.
-- 무음 입력/출력이 랜덤하게 소음을 생성하는 문제 수정. (이전 모델은 새 데이터셋으로 다시 훈련해야 합니다)
-
-### 2023년 4월 16일 업데이트
-
-- 로컬 실시간 음성 변경 미니-GUI 추가, go-realtime-gui.bat를 더블 클릭하여 시작.
-- 훈련 및 추론 중 50Hz 이하의 주파수 대역에 대해 필터링 적용.
-- 훈련 및 추론의 pyworld 최소 피치 추출을 기본 80에서 50으로 낮춤. 이로 인해, 50-80Hz 사이의 남성 저음이 무음화되지 않습니다.
-- 시스템 지역에 따른 WebUI 언어 변경 지원. (현재 en_US, ja_JP, zh_CN, zh_HK, zh_SG, zh_TW를 지원하며, 지원되지 않는 경우 기본값은 en_US)
-- 일부 GPU의 인식 수정. (예: V100-16G 인식 실패, P4 인식 실패)
-
-### 2023년 4월 28일 업데이트
-
-- Faiss 인덱스 설정 업그레이드로 속도가 더 빨라지고 품질이 향상.
-- total_npy에 대한 의존성 제거. 추후의 모델 공유는 total_npy 입력을 필요로 하지 않습니다.
-- 16 시리즈 GPU에 대한 제한 해제, 4GB VRAM GPU에 대한 4GB 추론 설정 제공.
-- 일부 오디오 형식에 대한 UVR5 보컬 동반 분리에서의 버그 수정.
-- 실시간 음성 변경 미니-GUI는 이제 non-40k 및 non-lazy 피치 모델을 지원합니다.
-
-### 추후 계획
-
-Features:
-
-- 다중 사용자 훈련 탭 지원.(최대 4명)
-
-Base model:
-
-- 훈련 데이터셋에 숨소리 wav 파일을 추가하여, 보컬의 호흡이 노이즈로 변환되는 문제 수정.
-- 보컬 훈련 세트의 기본 모델을 추가하기 위한 작업을 진행중이며, 이는 향후에 발표될 예정.
diff --git a/spaces/AIConsultant/MusicGen/audiocraft/grids/__init__.py b/spaces/AIConsultant/MusicGen/audiocraft/grids/__init__.py
deleted file mode 100644
index 70643517cd1a8b4e712eca90e23411ae89937795..0000000000000000000000000000000000000000
--- a/spaces/AIConsultant/MusicGen/audiocraft/grids/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-"""Dora Grids."""
diff --git a/spaces/AIConsultant/MusicGen/docs/AUDIOGEN.md b/spaces/AIConsultant/MusicGen/docs/AUDIOGEN.md
deleted file mode 100644
index a0ff481190fb52fe865aa66aaaa10176f7cf995c..0000000000000000000000000000000000000000
--- a/spaces/AIConsultant/MusicGen/docs/AUDIOGEN.md
+++ /dev/null
@@ -1,158 +0,0 @@
-# AudioGen: Textually-guided audio generation
-
-AudioCraft provides the code and a model re-implementing AudioGen, a [textually-guided audio generation][audiogen_arxiv]
-model that performs text-to-sound generation.
-
-The provided AudioGen reimplementation follows the LM model architecture introduced in [MusicGen][musicgen_arxiv]
-and is a single stage auto-regressive Transformer model trained over a 16kHz
-EnCodec tokenizer with 4 codebooks sampled at 50 Hz.
-This model variant reaches similar audio quality than the original implementation introduced in the AudioGen publication
-while providing faster generation speed given the smaller frame rate.
-
-**Important note:** The provided models are NOT the original models used to report numbers in the
-[AudioGen publication][audiogen_arxiv]. Refer to the model card to learn more about architectural changes.
-
-Listen to samples from the **original AudioGen implementation** in our [sample page][audiogen_samples].
-
-
-## Model Card
-
-See [the model card](../model_cards/AUDIOGEN_MODEL_CARD.md).
-
-
-## Installation
-
-Please follow the AudioCraft installation instructions from the [README](../README.md).
-
-AudioCraft requires a GPU with at least 16 GB of memory for running inference with the medium-sized models (~1.5B parameters).
-
-## API and usage
-
-We provide a simple API and 1 pre-trained models for AudioGen:
-
-`facebook/audiogen-medium`: 1.5B model, text to sound - [🤗 Hub](https://huggingface.co/facebook/audiogen-medium)
-
-You can play with AudioGen by running the jupyter notebook at [`demos/audiogen_demo.ipynb`](../demos/audiogen_demo.ipynb) locally (if you have a GPU).
-
-See after a quick example for using the API.
-
-```python
-import torchaudio
-from audiocraft.models import AudioGen
-from audiocraft.data.audio import audio_write
-
-model = AudioGen.get_pretrained('facebook/audiogen-medium')
-model.set_generation_params(duration=5) # generate 5 seconds.
-descriptions = ['dog barking', 'sirene of an emergency vehicle', 'footsteps in a corridor']
-wav = model.generate(descriptions) # generates 3 samples.
-
-for idx, one_wav in enumerate(wav):
- # Will save under {idx}.wav, with loudness normalization at -14 db LUFS.
- audio_write(f'{idx}', one_wav.cpu(), model.sample_rate, strategy="loudness", loudness_compressor=True)
-```
-
-## Training
-
-The [AudioGenSolver](../audiocraft/solvers/audiogen.py) implements the AudioGen's training pipeline
-used to develop the released model. Note that this may not fully reproduce the results presented in the paper.
-Similarly to MusicGen, it defines an autoregressive language modeling task over multiple streams of
-discrete tokens extracted from a pre-trained EnCodec model (see [EnCodec documentation](./ENCODEC.md)
-for more details on how to train such model) with dataset-specific changes for environmental sound
-processing.
-
-Note that **we do NOT provide any of the datasets** used for training AudioGen.
-
-### Example configurations and grids
-
-We provide configurations to reproduce the released models and our research.
-AudioGen solvers configuration are available in [config/solver/audiogen](../config/solver/audiogen).
-The base training configuration used for the released models is the following:
-[`solver=audiogen/audiogen_base_16khz`](../config/solver/audiogen/audiogen_base_16khz.yaml)
-
-Please find some example grids to train AudioGen at
-[audiocraft/grids/audiogen](../audiocraft/grids/audiogen/).
-
-```shell
-# text-to-sound
-dora grid audiogen.audiogen_base_16khz
-```
-
-### Sound dataset and metadata
-
-AudioGen's underlying dataset is an AudioDataset augmented with description metadata.
-The AudioGen dataset implementation expects the metadata to be available as `.json` files
-at the same location as the audio files or through specified external folder.
-Learn more in the [datasets section](./DATASETS.md).
-
-### Evaluation stage
-
-By default, evaluation stage is also computing the cross-entropy and the perplexity over the
-evaluation dataset. Indeed the objective metrics used for evaluation can be costly to run
-or require some extra dependencies. Please refer to the [metrics documentation](./METRICS.md)
-for more details on the requirements for each metric.
-
-We provide an off-the-shelf configuration to enable running the objective metrics
-for audio generation in
-[config/solver/audiogen/evaluation/objective_eval](../config/solver/audiogen/evaluation/objective_eval.yaml).
-
-One can then activate evaluation the following way:
-```shell
-# using the configuration
-dora run solver=audiogen/debug solver/audiogen/evaluation=objective_eval
-# specifying each of the fields, e.g. to activate KL computation
-dora run solver=audiogen/debug evaluate.metrics.kld=true
-```
-
-See [an example evaluation grid](../audiocraft/grids/audiogen/audiogen_pretrained_16khz_eval.py).
-
-### Generation stage
-
-The generation stage allows to generate samples conditionally and/or unconditionally and to perform
-audio continuation (from a prompt). We currently support greedy sampling (argmax), sampling
-from softmax with a given temperature, top-K and top-P (nucleus) sampling. The number of samples
-generated and the batch size used are controlled by the `dataset.generate` configuration
-while the other generation parameters are defined in `generate.lm`.
-
-```shell
-# control sampling parameters
-dora run solver=audiogen/debug generate.lm.gen_duration=5 generate.lm.use_sampling=true generate.lm.top_k=15
-```
-
-## More information
-
-Refer to [MusicGen's instructions](./MUSICGEN.md).
-
-### Learn more
-
-Learn more about AudioCraft training pipelines in the [dedicated section](./TRAINING.md).
-
-
-## Citation
-
-AudioGen
-```
-@article{kreuk2022audiogen,
- title={Audiogen: Textually guided audio generation},
- author={Kreuk, Felix and Synnaeve, Gabriel and Polyak, Adam and Singer, Uriel and D{\'e}fossez, Alexandre and Copet, Jade and Parikh, Devi and Taigman, Yaniv and Adi, Yossi},
- journal={arXiv preprint arXiv:2209.15352},
- year={2022}
-}
-```
-
-MusicGen
-```
-@article{copet2023simple,
- title={Simple and Controllable Music Generation},
- author={Jade Copet and Felix Kreuk and Itai Gat and Tal Remez and David Kant and Gabriel Synnaeve and Yossi Adi and Alexandre Défossez},
- year={2023},
- journal={arXiv preprint arXiv:2306.05284},
-}
-```
-
-## License
-
-See license information in the [model card](../model_cards/AUDIOGEN_MODEL_CARD.md).
-
-[audiogen_arxiv]: https://arxiv.org/abs/2209.15352
-[musicgen_arxiv]: https://arxiv.org/abs/2306.05284
-[audiogen_samples]: https://felixkreuk.github.io/audiogen/
diff --git a/spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/models/diffusion/ddpm_audio_inpaint.py b/spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/models/diffusion/ddpm_audio_inpaint.py
deleted file mode 100644
index 1541a74cd3082d8b44ba7a7988aeb65c2dd84a24..0000000000000000000000000000000000000000
--- a/spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/models/diffusion/ddpm_audio_inpaint.py
+++ /dev/null
@@ -1,1081 +0,0 @@
-"""
-wild mixture of
-https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
-https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
-https://github.com/CompVis/taming-transformers
--- merci
-"""
-import os
-import torch
-import torch.nn as nn
-import numpy as np
-import pytorch_lightning as pl
-from torch.optim.lr_scheduler import LambdaLR
-from einops import rearrange, repeat
-from contextlib import contextmanager
-from functools import partial
-from tqdm import tqdm
-from torchvision.utils import make_grid
-from pytorch_lightning.utilities.distributed import rank_zero_only
-
-from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
-from ldm.modules.ema import LitEma
-from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
-from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
-from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
-from ldm.models.diffusion.ddim import DDIMSampler
-from ldm.models.diffusion.ddpm import DDPM, disabled_train
-
-__conditioning_keys__ = {'concat': 'c_concat',
- 'crossattn': 'c_crossattn',
- 'adm': 'y'}
-
-# add mel_dim and mel_length params to ensure correct shape
-class LatentDiffusion_audioinpaint(DDPM):
- """main class"""
- def __init__(self,
- first_stage_config,
- cond_stage_config,
- num_timesteps_cond=None,
- mel_dim=80,
- mel_length=848,
- cond_stage_key="image",
- cond_stage_trainable=False,
- concat_mode=True,
- cond_stage_forward=None,
- conditioning_key=None,
- scale_factor=1.0,
- scale_by_std=False,
- test_repeat=1,
- test_numsteps = None,
- *args, **kwargs):
- self.num_timesteps_cond = default(num_timesteps_cond, 1)
- self.scale_by_std = scale_by_std
- assert self.num_timesteps_cond <= kwargs['timesteps']
- # for backwards compatibility after implementation of DiffusionWrapper
- if conditioning_key is None:
- conditioning_key = 'concat' if concat_mode else 'crossattn'
- if cond_stage_config == '__is_unconditional__':
- conditioning_key = None
- ckpt_path = kwargs.pop("ckpt_path", None)
- ignore_keys = kwargs.pop("ignore_keys", [])
- super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
- self.test_repeat = test_repeat
- if test_numsteps == None:
- self.test_numsteps = self.num_timesteps
- self.concat_mode = concat_mode
- self.mel_dim = mel_dim
- self.mel_length = mel_length
- self.cond_stage_trainable = cond_stage_trainable
- self.cond_stage_key = cond_stage_key
- try:
- self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
- except:
- self.num_downs = 0
- if not scale_by_std:
- self.scale_factor = scale_factor
- else:
- self.register_buffer('scale_factor', torch.tensor(scale_factor))
- self.instantiate_first_stage(first_stage_config)
- self.instantiate_cond_stage(cond_stage_config)
- self.cond_stage_forward = cond_stage_forward
- self.clip_denoised = False
- self.bbox_tokenizer = None
-
- self.restarted_from_ckpt = False
- if ckpt_path is not None:
- self.init_from_ckpt(ckpt_path, ignore_keys)
- self.restarted_from_ckpt = True
-
- def make_cond_schedule(self, ):
- self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
- ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
- self.cond_ids[:self.num_timesteps_cond] = ids
-
- @rank_zero_only
- @torch.no_grad()
- def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
- # only for very first batch
- if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
- assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
- # set rescale weight to 1./std of encodings
- print("### USING STD-RESCALING ###")
- x = super().get_input(batch, self.first_stage_key)
- x = x.to(self.device)
- encoder_posterior = self.encode_first_stage(x)
- z = self.get_first_stage_encoding(encoder_posterior).detach()
- del self.scale_factor
- self.register_buffer('scale_factor', 1. / z.flatten().std())
- print(f"setting self.scale_factor to {self.scale_factor}")
- print("### USING STD-RESCALING ###")
-
- def register_schedule(self,
- given_betas=None, beta_schedule="linear", timesteps=1000,
- linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
- super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
-
- self.shorten_cond_schedule = self.num_timesteps_cond > 1
- if self.shorten_cond_schedule:
- self.make_cond_schedule()
-
- def instantiate_first_stage(self, config):
- model = instantiate_from_config(config)
- self.first_stage_model = model.eval()
- self.first_stage_model.train = disabled_train
- for param in self.first_stage_model.parameters():
- param.requires_grad = False
-
- def instantiate_cond_stage(self, config):
- if not self.cond_stage_trainable:
- if config == "__is_first_stage__":# for no_text inpainting task
- print("Using first stage also as cond stage.")
- self.cond_stage_model = self.first_stage_model
- elif config == "__is_unconditional__":# for unconditional image generation such as human face、ImageNet
- print(f"Training {self.__class__.__name__} as an unconditional model.")
- self.cond_stage_model = None
- # self.be_unconditional = True
- else:
- model = instantiate_from_config(config)
- self.cond_stage_model = model.eval()
- self.cond_stage_model.train = disabled_train
- for param in self.cond_stage_model.parameters():
- param.requires_grad = False
- else:
- assert config != '__is_first_stage__'
- assert config != '__is_unconditional__'
- model = instantiate_from_config(config)
- self.cond_stage_model = model
-
- def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
- denoise_row = []
- for zd in tqdm(samples, desc=desc):
- denoise_row.append(self.decode_first_stage(zd.to(self.device),
- force_not_quantize=force_no_decoder_quantization))
- n_imgs_per_row = len(denoise_row)
- denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
- denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
- denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
- denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
- return denoise_grid
-
- def get_first_stage_encoding(self, encoder_posterior):# encode_emb from autoencoder
- if isinstance(encoder_posterior, DiagonalGaussianDistribution):
- z = encoder_posterior.sample()
- elif isinstance(encoder_posterior, torch.Tensor):
- z = encoder_posterior
- else:
- raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
- return self.scale_factor * z
-
- def get_learned_conditioning(self, c):
- if self.cond_stage_forward is None:
- if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
- c = self.cond_stage_model.encode(c)
- if isinstance(c, DiagonalGaussianDistribution):
- c = c.mode()
- else:
- c = self.cond_stage_model(c)
- else:
- assert hasattr(self.cond_stage_model, self.cond_stage_forward)
- c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
- return c
-
- def meshgrid(self, h, w):
- y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
- x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
-
- arr = torch.cat([y, x], dim=-1)
- return arr
-
- def delta_border(self, h, w):
- """
- :param h: height
- :param w: width
- :return: normalized distance to image border,
- wtith min distance = 0 at border and max dist = 0.5 at image center
- """
- lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
- arr = self.meshgrid(h, w) / lower_right_corner
- dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
- dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
- edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
- return edge_dist
-
- def get_weighting(self, h, w, Ly, Lx, device):
- weighting = self.delta_border(h, w)
- weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
- self.split_input_params["clip_max_weight"], )
- weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
-
- if self.split_input_params["tie_braker"]:
- L_weighting = self.delta_border(Ly, Lx)
- L_weighting = torch.clip(L_weighting,
- self.split_input_params["clip_min_tie_weight"],
- self.split_input_params["clip_max_tie_weight"])
-
- L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
- weighting = weighting * L_weighting
- return weighting
-
- def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
- """
- :param x: img of size (bs, c, h, w)
- :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
- """
- bs, nc, h, w = x.shape
-
- # number of crops in image
- Ly = (h - kernel_size[0]) // stride[0] + 1
- Lx = (w - kernel_size[1]) // stride[1] + 1
-
- if uf == 1 and df == 1:
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
- unfold = torch.nn.Unfold(**fold_params)
-
- fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
-
- weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
- normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
- weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
-
- elif uf > 1 and df == 1:
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
- unfold = torch.nn.Unfold(**fold_params)
-
- fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
- dilation=1, padding=0,
- stride=(stride[0] * uf, stride[1] * uf))
- fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
-
- weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
- normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
- weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
-
- elif df > 1 and uf == 1:
- fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
- unfold = torch.nn.Unfold(**fold_params)
-
- fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
- dilation=1, padding=0,
- stride=(stride[0] // df, stride[1] // df))
- fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
-
- weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
- normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
- weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
-
- else:
- raise NotImplementedError
-
- return fold, unfold, normalization, weighting
-
- @torch.no_grad()
- def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
- cond_key=None, return_original_cond=False, bs=None):
- x = super().get_input(batch, k)
- if bs is not None:
- x = x[:bs]
- x = x.to(self.device)
- encoder_posterior = self.encode_first_stage(x)
- z = self.get_first_stage_encoding(encoder_posterior).detach()
-
- if self.model.conditioning_key is not None:# 'crossattn' for txt2image, 'hybird' for txt_inpaint
- if cond_key is None:
- cond_key = self.cond_stage_key # 'caption' for txt_inpaint
- if self.model.conditioning_key == 'hybrid':
- xc = {}
- assert cond_key == 'caption' # only txt_inpaint is implemented now
- assert 'masked_image' in batch.keys()
- assert 'mask' in batch.keys()
- masked_image = super().get_input(batch,'masked_image')
- mask = super().get_input(batch,'mask')
- if bs is not None:
- masked_image,mask = masked_image[:bs],mask[:bs]
- masked_image,mask = masked_image.to(self.device),mask.to(self.device)
- masked_image = self.get_first_stage_encoding(self.encode_first_stage(masked_image)).detach()
- resized_mask = torch.nn.functional.interpolate(mask,size=masked_image.shape[-2:])
- xc['c_concat'] = torch.cat((masked_image,resized_mask),dim = 1)
- xc[cond_key] = batch[cond_key]
- else:
- if cond_key != self.first_stage_key:
- if cond_key in ['caption', 'coordinates_bbox']:
- xc = batch[cond_key]
- elif cond_key == 'class_label':
- xc = batch
- else:
- xc = super().get_input(batch, cond_key).to(self.device)
- else:# cond_key == 'image'
- xc = x
- if not self.cond_stage_trainable or force_c_encode:# cond_stage_trainable is true for txt2img,force_c_encoder = True,when called in log_images
- if isinstance(xc, list):
- # import pudb; pudb.set_trace()
- c = self.get_learned_conditioning(xc)# 因为log_images内接下来要调用sample_log,所以需要预先得到处理好的c
- if isinstance(xc, dict):
- c = {}
- c['c_concat'] = xc['c_concat']
- c['c_crossattn'] = self.get_learned_conditioning(xc[cond_key])
- else:
- c = self.get_learned_conditioning(xc.to(self.device))
- else:
- c = xc
- if bs is not None:
- if isinstance(c,dict):
- for k in c.keys():
- c[k] = c[k][:bs]
- else:
- c = c[:bs]
-
- if self.use_positional_encodings:
- pos_x, pos_y = self.compute_latent_shifts(batch)
- ckey = __conditioning_keys__[self.model.conditioning_key]
- c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
-
- else:
- c = None
- xc = None
- if self.use_positional_encodings:
- pos_x, pos_y = self.compute_latent_shifts(batch)
- c = {'pos_x': pos_x, 'pos_y': pos_y}
- out = [z, c]
- if return_first_stage_outputs:
- xrec = self.decode_first_stage(z)
- out.extend([x, xrec])
- if return_original_cond:
- out.append(xc)
- return out
-
- @torch.no_grad()
- def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
- if predict_cids:
- if z.dim() == 4:
- z = torch.argmax(z.exp(), dim=1).long()
- z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
- z = rearrange(z, 'b h w c -> b c h w').contiguous()
-
- z = 1. / self.scale_factor * z
-
- if hasattr(self, "split_input_params"):
- if self.split_input_params["patch_distributed_vq"]:
- ks = self.split_input_params["ks"] # eg. (128, 128)
- stride = self.split_input_params["stride"] # eg. (64, 64)
- uf = self.split_input_params["vqf"]
- bs, nc, h, w = z.shape
- if ks[0] > h or ks[1] > w:
- ks = (min(ks[0], h), min(ks[1], w))
- print("reducing Kernel")
-
- if stride[0] > h or stride[1] > w:
- stride = (min(stride[0], h), min(stride[1], w))
- print("reducing stride")
-
- fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
-
- z = unfold(z) # (bn, nc * prod(**ks), L)
- # 1. Reshape to img shape
- z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
-
- # 2. apply model loop over last dim
- if isinstance(self.first_stage_model, VQModelInterface):
- output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
- force_not_quantize=predict_cids or force_not_quantize)
- for i in range(z.shape[-1])]
- else:
-
- output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
- for i in range(z.shape[-1])]
-
- o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
- o = o * weighting
- # Reverse 1. reshape to img shape
- o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
- # stitch crops together
- decoded = fold(o)
- decoded = decoded / normalization # norm is shape (1, 1, h, w)
- return decoded
- else:
- if isinstance(self.first_stage_model, VQModelInterface):
- return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
- else:
- return self.first_stage_model.decode(z)
-
- else:
- if isinstance(self.first_stage_model, VQModelInterface):
- return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
- else:
- return self.first_stage_model.decode(z)
-
- # same as above but without decorator
- def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
- if predict_cids:
- if z.dim() == 4:
- z = torch.argmax(z.exp(), dim=1).long()
- z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
- z = rearrange(z, 'b h w c -> b c h w').contiguous()
-
- z = 1. / self.scale_factor * z
-
- if hasattr(self, "split_input_params"):
- if self.split_input_params["patch_distributed_vq"]:
- ks = self.split_input_params["ks"] # eg. (128, 128)
- stride = self.split_input_params["stride"] # eg. (64, 64)
- uf = self.split_input_params["vqf"]
- bs, nc, h, w = z.shape
- if ks[0] > h or ks[1] > w:
- ks = (min(ks[0], h), min(ks[1], w))
- print("reducing Kernel")
-
- if stride[0] > h or stride[1] > w:
- stride = (min(stride[0], h), min(stride[1], w))
- print("reducing stride")
-
- fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
-
- z = unfold(z) # (bn, nc * prod(**ks), L)
- # 1. Reshape to img shape
- z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
-
- # 2. apply model loop over last dim
- if isinstance(self.first_stage_model, VQModelInterface):
- output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
- force_not_quantize=predict_cids or force_not_quantize)
- for i in range(z.shape[-1])]
- else:
-
- output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
- for i in range(z.shape[-1])]
-
- o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
- o = o * weighting
- # Reverse 1. reshape to img shape
- o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
- # stitch crops together
- decoded = fold(o)
- decoded = decoded / normalization # norm is shape (1, 1, h, w)
- return decoded
- else:
- if isinstance(self.first_stage_model, VQModelInterface):
- return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
- else:
- return self.first_stage_model.decode(z)
-
- else:
- if isinstance(self.first_stage_model, VQModelInterface):
- return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
- else:
- return self.first_stage_model.decode(z)
-
- @torch.no_grad()
- def encode_first_stage(self, x):
- if hasattr(self, "split_input_params"):
- if self.split_input_params["patch_distributed_vq"]:
- ks = self.split_input_params["ks"] # eg. (128, 128)
- stride = self.split_input_params["stride"] # eg. (64, 64)
- df = self.split_input_params["vqf"]
- self.split_input_params['original_image_size'] = x.shape[-2:]
- bs, nc, h, w = x.shape
- if ks[0] > h or ks[1] > w:
- ks = (min(ks[0], h), min(ks[1], w))
- print("reducing Kernel")
-
- if stride[0] > h or stride[1] > w:
- stride = (min(stride[0], h), min(stride[1], w))
- print("reducing stride")
-
- fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df)
- z = unfold(x) # (bn, nc * prod(**ks), L)
- # Reshape to img shape
- z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
-
- output_list = [self.first_stage_model.encode(z[:, :, :, :, i])
- for i in range(z.shape[-1])]
-
- o = torch.stack(output_list, axis=-1)
- o = o * weighting
-
- # Reverse reshape to img shape
- o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
- # stitch crops together
- decoded = fold(o)
- decoded = decoded / normalization
- return decoded
-
- else:
- return self.first_stage_model.encode(x)
- else:
- return self.first_stage_model.encode(x)
-
- def shared_step(self, batch, **kwargs):
- x, c = self.get_input(batch, self.first_stage_key)# get latent and condition
- loss = self(x, c)
- return loss
-
- def test_step(self,batch,batch_idx):
- # TODO make self.test_repeat work
- cond = {}
- cond[self.cond_stage_key] = batch[self.cond_stage_key]
- cond[self.cond_stage_key] = self.get_learned_conditioning(cond[self.cond_stage_key]) # c: string -> [B, T, Context_dim]
- cond['c_crossattn'] = cond.pop(self.cond_stage_key)
- masked_image = super().get_input(batch,'masked_image')
- mask = super().get_input(batch,'mask')
- masked_image,mask = masked_image.to(self.device),mask.to(self.device)
- masked_image = self.get_first_stage_encoding(self.encode_first_stage(masked_image)).detach()
- resized_mask = torch.nn.functional.interpolate(mask,size=masked_image.shape[-2:])
- cond['c_concat'] = torch.cat((masked_image,resized_mask),dim = 1)
- batch_size = len(batch[self.cond_stage_key])
- # shape = [batch_size,self.channels,self.mel_dim,self.mel_length]
- enc_emb = self.sample(cond,batch_size,timesteps=self.test_numsteps)
- xrec = self.decode_first_stage(enc_emb)
- reconstructions = (xrec + 1)/2 # to mel scale
- test_ckpt_path = os.path.basename(self.trainer.tested_ckpt_path)
- savedir = os.path.join(self.trainer.log_dir,f'output_imgs_{test_ckpt_path}','fake_class')
- if not os.path.exists(savedir):
- os.makedirs(savedir)
-
- file_names = batch['f_name']
- nfiles = len(file_names)
- reconstructions = reconstructions.cpu().numpy().squeeze(1) # squuze channel dim
- for k in range(reconstructions.shape[0]):
- b,repeat = k % nfiles, k // nfiles
- vname_num_split_index = file_names[b].rfind('_')# file_names[b]:video_name+'_'+num
- v_n,num = file_names[b][:vname_num_split_index],file_names[b][vname_num_split_index+1:]
- save_img_path = os.path.join(savedir,f'{v_n}_sample_{num}_{repeat}.npy')# the num_th caption, the repeat_th repitition
- np.save(save_img_path,reconstructions[b])
-
- return None
-
- def forward(self, x, c, *args, **kwargs):
- t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
- if self.model.conditioning_key is not None:
- assert c is not None
- if self.cond_stage_trainable:
- if isinstance(c,dict):
- c[self.cond_stage_key] = self.get_learned_conditioning(c[self.cond_stage_key])
- c['c_crossattn'] = c.pop(self.cond_stage_key)
- else:
- c = self.get_learned_conditioning(c) # c: string -> [B, T, Context_dim]
- if self.shorten_cond_schedule: # TODO: drop this option
- tc = self.cond_ids[t].to(self.device)
- c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
- return self.p_losses(x, c, t, *args, **kwargs)
-
- def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset
- def rescale_bbox(bbox):
- x0 = torch.clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])
- y0 = torch.clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])
- w = min(bbox[2] / crop_coordinates[2], 1 - x0)
- h = min(bbox[3] / crop_coordinates[3], 1 - y0)
- return x0, y0, w, h
-
- return [rescale_bbox(b) for b in bboxes]
-
- def apply_model(self, x_noisy, t, cond, return_ids=False):
- # make values to list to enable concat operation in
- if isinstance(cond, dict):
- # hybrid case, cond is exptected to be a dict. (txt2inpaint)
- cond_tmp = {}# use cond_tmp to avoid inplace edit
- for k,v in cond.items():
- if not isinstance(v, list):
- cond_tmp[k] = [cond[k]]
- else:
- cond_tmp[k] = cond[k]
- cond = cond_tmp
- else:
- if not isinstance(cond, list):
- cond = [cond]
- key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
- cond = {key: cond}
-
- if hasattr(self, "split_input_params"):
- assert len(cond) == 1 # todo can only deal with one conditioning atm
- assert not return_ids
- ks = self.split_input_params["ks"] # eg. (128, 128)
- stride = self.split_input_params["stride"] # eg. (64, 64)
-
- h, w = x_noisy.shape[-2:]
-
- fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride)
-
- z = unfold(x_noisy) # (bn, nc * prod(**ks), L)
- # Reshape to img shape
- z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
- z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]
-
- if self.cond_stage_key in ["image", "LR_image", "segmentation",
- 'bbox_img'] and self.model.conditioning_key: # todo check for completeness
- c_key = next(iter(cond.keys())) # get key
- c = next(iter(cond.values())) # get value
- assert (len(c) == 1) # todo extend to list with more than one elem
- c = c[0] # get element
-
- c = unfold(c)
- c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L )
-
- cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
-
- elif self.cond_stage_key == 'coordinates_bbox':
- assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
-
- # assuming padding of unfold is always 0 and its dilation is always 1
- n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
- full_img_h, full_img_w = self.split_input_params['original_image_size']
- # as we are operating on latents, we need the factor from the original image size to the
- # spatial latent size to properly rescale the crops for regenerating the bbox annotations
- num_downs = self.first_stage_model.encoder.num_resolutions - 1
- rescale_latent = 2 ** (num_downs)
-
- # get top left postions of patches as conforming for the bbbox tokenizer, therefore we
- # need to rescale the tl patch coordinates to be in between (0,1)
- tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
- rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
- for patch_nr in range(z.shape[-1])]
-
- # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)
- patch_limits = [(x_tl, y_tl,
- rescale_latent * ks[0] / full_img_w,
- rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates]
- # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]
-
- # tokenize crop coordinates for the bounding boxes of the respective patches
- patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device)
- for bbox in patch_limits] # list of length l with tensors of shape (1, 2)
- print(patch_limits_tknzd[0].shape)
- # cut tknzd crop position from conditioning
- assert isinstance(cond, dict), 'cond must be dict to be fed into model'
- cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device)
- print(cut_cond.shape)
-
- adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd])
- adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n')
- print(adapted_cond.shape)
- adapted_cond = self.get_learned_conditioning(adapted_cond)
- print(adapted_cond.shape)
- adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1])
- print(adapted_cond.shape)
-
- cond_list = [{'c_crossattn': [e]} for e in adapted_cond]
-
- else:
- cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient
-
- # apply model by loop over crops
- output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])]
- assert not isinstance(output_list[0],
- tuple) # todo cant deal with multiple model outputs check this never happens
-
- o = torch.stack(output_list, axis=-1)
- o = o * weighting
- # Reverse reshape to img shape
- o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
- # stitch crops together
- x_recon = fold(o) / normalization
-
- else:
- # x_noisy is tensor with shape [b,c,mel_len,T]
- # if condition is caption ,cond['c_crossattn'] is a list, each item shape is [1, 77, 1280]
- x_recon = self.model(x_noisy, t, **cond)# tensor with shape [b,c,mel_len,T]
-
- if isinstance(x_recon, tuple) and not return_ids:
- return x_recon[0]
- else:
- return x_recon
-
- def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
- return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
- extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
-
- def _prior_bpd(self, x_start):
- """
- Get the prior KL term for the variational lower-bound, measured in
- bits-per-dim.
- This term can't be optimized, as it only depends on the encoder.
- :param x_start: the [N x C x ...] tensor of inputs.
- :return: a batch of [N] KL values (in bits), one per batch element.
- """
- batch_size = x_start.shape[0]
- t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
- qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
- kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
- return mean_flat(kl_prior) / np.log(2.0)
-
- def p_losses(self, x_start, cond, t, noise=None):
- noise = default(noise, lambda: torch.randn_like(x_start))
- x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
- model_output = self.apply_model(x_noisy, t, cond)
-
- loss_dict = {}
- prefix = 'train' if self.training else 'val'
-
- if self.parameterization == "x0":
- target = x_start
- elif self.parameterization == "eps":
- target = noise
- else:
- raise NotImplementedError()
-
- loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
- loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
-
- logvar_t = self.logvar[t].to(self.device)
- loss = loss_simple / torch.exp(logvar_t) + logvar_t
- # loss = loss_simple / torch.exp(self.logvar) + self.logvar
- if self.learn_logvar:
- loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
- loss_dict.update({'logvar': self.logvar.data.mean()})
-
- loss = self.l_simple_weight * loss.mean()
-
- loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
- loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
- loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
- loss += (self.original_elbo_weight * loss_vlb)
- loss_dict.update({f'{prefix}/loss': loss})
-
- return loss, loss_dict
-
- def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
- return_x0=False, score_corrector=None, corrector_kwargs=None):
- t_in = t
- model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
-
- if score_corrector is not None:
- assert self.parameterization == "eps"
- model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
-
- if return_codebook_ids:
- model_out, logits = model_out
-
- if self.parameterization == "eps":
- x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
- elif self.parameterization == "x0":
- x_recon = model_out
- else:
- raise NotImplementedError()
-
- if clip_denoised:
- x_recon.clamp_(-1., 1.)
- if quantize_denoised:
- x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
- model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
- if return_codebook_ids:
- return model_mean, posterior_variance, posterior_log_variance, logits
- elif return_x0:
- return model_mean, posterior_variance, posterior_log_variance, x_recon
- else:
- return model_mean, posterior_variance, posterior_log_variance
-
- @torch.no_grad()
- def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
- return_codebook_ids=False, quantize_denoised=False, return_x0=False,
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
- b, *_, device = *x.shape, x.device
- outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
- return_codebook_ids=return_codebook_ids,
- quantize_denoised=quantize_denoised,
- return_x0=return_x0,
- score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
- if return_codebook_ids:
- raise DeprecationWarning("Support dropped.")
- model_mean, _, model_log_variance, logits = outputs
- elif return_x0:
- model_mean, _, model_log_variance, x0 = outputs
- else:
- model_mean, _, model_log_variance = outputs
-
- noise = noise_like(x.shape, device, repeat_noise) * temperature
- if noise_dropout > 0.:
- noise = torch.nn.functional.dropout(noise, p=noise_dropout)
- # no noise when t == 0
- nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
-
- if return_codebook_ids:
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
- if return_x0:
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
- else:
- return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
-
- @torch.no_grad()
- def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
- img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
- score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
- log_every_t=None):
- if not log_every_t:
- log_every_t = self.log_every_t
- timesteps = self.num_timesteps
- if batch_size is not None:
- b = batch_size if batch_size is not None else shape[0]
- shape = [batch_size] + list(shape)
- else:
- b = batch_size = shape[0]
- if x_T is None:
- img = torch.randn(shape, device=self.device)
- else:
- img = x_T
- intermediates = []
- if cond is not None:
- if isinstance(cond, dict):
- cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
- else:
- cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
-
- if start_T is not None:
- timesteps = min(timesteps, start_T)
- iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
- total=timesteps) if verbose else reversed(
- range(0, timesteps))
- if type(temperature) == float:
- temperature = [temperature] * timesteps
-
- for i in iterator:
- ts = torch.full((b,), i, device=self.device, dtype=torch.long)
- if self.shorten_cond_schedule:
- assert self.model.conditioning_key != 'hybrid'
- tc = self.cond_ids[ts].to(cond.device)
- cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
-
- img, x0_partial = self.p_sample(img, cond, ts,
- clip_denoised=self.clip_denoised,
- quantize_denoised=quantize_denoised, return_x0=True,
- temperature=temperature[i], noise_dropout=noise_dropout,
- score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
- if mask is not None:
- assert x0 is not None
- img_orig = self.q_sample(x0, ts)
- img = img_orig * mask + (1. - mask) * img
-
- if i % log_every_t == 0 or i == timesteps - 1:
- intermediates.append(x0_partial)
- if callback: callback(i)
- if img_callback: img_callback(img, i)
- return img, intermediates
-
- @torch.no_grad()
- def p_sample_loop(self, cond, shape, return_intermediates=False,
- x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
- mask=None, x0=None, img_callback=None, start_T=None,
- log_every_t=None):
-
- if not log_every_t:
- log_every_t = self.log_every_t
- device = self.betas.device
- b = shape[0]
- if x_T is None:
- img = torch.randn(shape, device=device)
- else:
- img = x_T
-
- intermediates = [img]
- if timesteps is None:
- timesteps = self.num_timesteps
-
- if start_T is not None:
- timesteps = min(timesteps, start_T)
- iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
- range(0, timesteps))
-
- if mask is not None:
- assert x0 is not None
- assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
-
- for i in iterator:
- ts = torch.full((b,), i, device=device, dtype=torch.long)
- if self.shorten_cond_schedule:
- assert self.model.conditioning_key != 'hybrid'
- tc = self.cond_ids[ts].to(cond.device)
- cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
-
- img = self.p_sample(img, cond, ts,
- clip_denoised=self.clip_denoised,
- quantize_denoised=quantize_denoised)
- if mask is not None:
- img_orig = self.q_sample(x0, ts)
- img = img_orig * mask + (1. - mask) * img
-
- if i % log_every_t == 0 or i == timesteps - 1:
- intermediates.append(img)
- if callback: callback(i)
- if img_callback: img_callback(img, i)
-
- if return_intermediates:
- return img, intermediates
- return img
-
- @torch.no_grad()
- def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
- verbose=True, timesteps=None, quantize_denoised=False,
- mask=None, x0=None, shape=None,**kwargs):
- if shape is None:
- shape = (batch_size, self.channels, self.mel_dim, self.mel_length)
- if cond is not None:
- if isinstance(cond, dict):
- cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
- list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
- else:
- cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
- return self.p_sample_loop(cond,
- shape,
- return_intermediates=return_intermediates, x_T=x_T,
- verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
- mask=mask, x0=x0)
-
- @torch.no_grad()
- def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
- if ddim:
- ddim_sampler = DDIMSampler(self)
- shape = (self.channels, self.mel_dim, self.mel_length)
- samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size,
- shape,cond,verbose=False,**kwargs)
-
- else:
- samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
- return_intermediates=True,**kwargs)
-
- return samples, intermediates
-
- @torch.no_grad()
- def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
- quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
- plot_diffusion_rows=True, **kwargs):
-
- use_ddim = ddim_steps is not None
-
- log = dict()
- z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
- return_first_stage_outputs=True,
- force_c_encode=True,
- return_original_cond=True,
- bs=N)
-
- N = min(x.shape[0], N)
- n_row = min(x.shape[0], n_row)
- log["inputs"] = x # 原始输入图像
- log["reconstruction"] = xrec # 重建得到的图像
- if self.model.conditioning_key is not None:
- if hasattr(self.cond_stage_model, "decode"):# when cond_stage is first_stage. (bert embedder doesnot have decode)
- xc = self.cond_stage_model.decode(c)# decoded masked image
- log["conditioning"] = xc # 重建后的图像
- elif self.cond_stage_key in ["caption"]:
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
- log["conditioning"] = xc # 含有文本的图像
- if self.model.conditioning_key == 'hybrid':
- log["decoded_maskedimg"] = self.first_stage_model.decode(c['c_concat'][:,:self.first_stage_model.embed_dim])# c_concat is the concat result of masked_img latent and resized mask. get latent here to decode
- elif self.cond_stage_key == 'class_label':
- xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
- log['conditioning'] = xc # 文本为类标签的图像
- elif isimage(xc):
- log["conditioning"] = xc
- if ismap(xc):
- log["original_conditioning"] = self.to_rgb(xc)
-
- if plot_diffusion_rows:# diffusion每一步的图像
- # get diffusion row
- diffusion_row = list()
- z_start = z[:n_row]
- for t in range(self.num_timesteps):
- if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
- t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
- t = t.to(self.device).long()
- noise = torch.randn_like(z_start)
- z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
- diffusion_row.append(self.decode_first_stage(z_noisy))
-
- diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
- diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
- diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
- diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
- log["diffusion_row"] = diffusion_grid
-
- if sample:#
- # get denoise row
- with self.ema_scope("Plotting"):
- samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
- ddim_steps=ddim_steps,eta=ddim_eta)
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
- x_samples = self.decode_first_stage(samples)
- log["samples"] = x_samples
- if plot_denoise_rows:
- denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
- log["denoise_row"] = denoise_grid
-
- if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
- self.first_stage_model, IdentityFirstStage):
- # also display when quantizing x0 while sampling
- with self.ema_scope("Plotting Quantized Denoised"):
- samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
- ddim_steps=ddim_steps,eta=ddim_eta,
- quantize_denoised=True)
- # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
- # quantize_denoised=True)
- x_samples = self.decode_first_stage(samples.to(self.device))
- log["samples_x0_quantized"] = x_samples
-
- if inpaint:
- # make a simple center square
- b, h, w = z.shape[0], z.shape[2], z.shape[3]
- mask = torch.ones(N, h, w).to(self.device)
- # zeros will be filled in
- mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
- mask = mask[:, None, ...]# N,1,H,W
- with self.ema_scope("Plotting Inpaint"):
- samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
- ddim_steps=ddim_steps, x0=z[:N], mask=mask)
- x_samples = self.decode_first_stage(samples.to(self.device))
- log["samples_inpainting"] = x_samples
- log["mask"] = mask
-
- # outpaint
- with self.ema_scope("Plotting Outpaint"):
- samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
- ddim_steps=ddim_steps, x0=z[:N], mask=mask)
- x_samples = self.decode_first_stage(samples.to(self.device))
- log["samples_outpainting"] = x_samples
-
- if plot_progressive_rows:
- with self.ema_scope("Plotting Progressives"):
- img, progressives = self.progressive_denoising(c,
- shape=(self.channels, self.mel_dim, self.mel_length),
- batch_size=N)
- prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
- log["progressive_row"] = prog_row
-
- if return_keys:
- if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
- return log
- else:
- return {key: log[key] for key in return_keys}
- return log
-
- def configure_optimizers(self):
- lr = self.learning_rate
- params = list(self.model.parameters())
- if self.cond_stage_trainable:
- print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
- params = params + list(self.cond_stage_model.parameters())
- if self.learn_logvar:
- print('Diffusion model optimizing logvar')
- params.append(self.logvar)
- opt = torch.optim.AdamW(params, lr=lr)
- if self.use_scheduler:
- assert 'target' in self.scheduler_config
- scheduler = instantiate_from_config(self.scheduler_config)
-
- print("Setting up LambdaLR scheduler...")
- scheduler = [
- {
- 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
- 'interval': 'step',
- 'frequency': 1
- }]
- return [opt], scheduler
- return opt
-
- @torch.no_grad()
- def to_rgb(self, x):
- x = x.float()
- if not hasattr(self, "colorize"):
- self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
- x = nn.functional.conv2d(x, weight=self.colorize)
- x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
- return x
-
diff --git a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/logout/+page.server.ts b/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/logout/+page.server.ts
deleted file mode 100644
index 1d60b6c5d8df28981da4d06d5ea58eeeaf838b47..0000000000000000000000000000000000000000
--- a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/logout/+page.server.ts
+++ /dev/null
@@ -1,17 +0,0 @@
-import { dev } from "$app/environment";
-import { base } from "$app/paths";
-import { COOKIE_NAME } from "$env/static/private";
-import { redirect } from "@sveltejs/kit";
-
-export const actions = {
- default: async function ({ cookies }) {
- cookies.delete(COOKIE_NAME, {
- path: "/",
- // So that it works inside the space's iframe
- sameSite: dev ? "lax" : "none",
- secure: !dev,
- httpOnly: true,
- });
- throw redirect(303, `${base}/`);
- },
-};
diff --git a/spaces/Adr740/Hadith_AI_Explorer/data.py b/spaces/Adr740/Hadith_AI_Explorer/data.py
deleted file mode 100644
index 136bd1e5e925c8936cb792ae2bce1dc830308dff..0000000000000000000000000000000000000000
--- a/spaces/Adr740/Hadith_AI_Explorer/data.py
+++ /dev/null
@@ -1,2 +0,0 @@
-import pandas as pd
-data = pd.read_pickle("pickle_ebd.pkl")
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/PreLayout.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/PreLayout.js
deleted file mode 100644
index 6869558720041323d5bc41a1ac7e90f51c10468f..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/PreLayout.js
+++ /dev/null
@@ -1,15 +0,0 @@
-var PreLayout = function () {
- this._childrenWidth = undefined;
- this._childrenHeight = undefined;
-
- var children = this.getChildrenSizers(),
- child;
- for (var i = 0, cnt = children.length; i < cnt; i++) {
- child = children[i];
- if (child.ignoreLayout) {
- continue;
- }
- child.preLayout();
- }
-}
-export default PreLayout;
\ No newline at end of file
diff --git a/spaces/Ailexcoder/GPT4ALL1/app.py b/spaces/Ailexcoder/GPT4ALL1/app.py
deleted file mode 100644
index 30287cab6d0c1aa31c6361afa856f9b231210488..0000000000000000000000000000000000000000
--- a/spaces/Ailexcoder/GPT4ALL1/app.py
+++ /dev/null
@@ -1,143 +0,0 @@
-from __future__ import annotations
-from typing import Iterable
-import gradio as gr
-from gradio.themes.base import Base
-from gradio.themes.utils import colors, fonts, sizes
-
-from llama_cpp import Llama
-from huggingface_hub import hf_hub_download
-
-hf_hub_download(repo_id="LLukas22/gpt4all-lora-quantized-ggjt", filename="ggjt-model.bin", local_dir=".")
-llm = Llama(model_path="./ggjt-model.bin", n_threads=2)
-
-
-ins = '''### Instruction:
-{}
-### Response:
-'''
-
-theme = gr.themes.Monochrome(
- primary_hue="indigo",
- secondary_hue="blue",
- neutral_hue="slate",
- radius_size=gr.themes.sizes.radius_sm,
- font=[gr.themes.GoogleFont("Open Sans"), "ui-sans-serif", "system-ui", "sans-serif"],
-)
-
-
-
-
-# def generate(instruction):
-# response = llm(ins.format(instruction))
-# response = response['choices'][0]['text']
-# result = ""
-# for word in response.split(" "):
-# result += word + " "
-# yield result
-
-def generate(instruction):
- result = ""
- for x in llm(ins.format(instruction), stop=['### Instruction:', '### End'], stream=True):
- result += x['choices'][0]['text']
- yield result
-
-
-examples = [
- "Instead of making a peanut butter and jelly sandwich, what else could I combine peanut butter with in a sandwich? Give five ideas",
- "How do I make a campfire?",
- "Explain to me the difference between nuclear fission and fusion.",
- "I'm selling my Nikon D-750, write a short blurb for my ad."
-]
-
-def process_example(args):
- for x in generate(args):
- pass
- return x
-
-css = ".generating {visibility: hidden}"
-
-# Based on the gradio theming guide and borrowed from https://huggingface.co/spaces/shivi/dolly-v2-demo
-class SeafoamCustom(Base):
- def __init__(
- self,
- *,
- primary_hue: colors.Color | str = colors.emerald,
- secondary_hue: colors.Color | str = colors.blue,
- neutral_hue: colors.Color | str = colors.blue,
- spacing_size: sizes.Size | str = sizes.spacing_md,
- radius_size: sizes.Size | str = sizes.radius_md,
- font: fonts.Font
- | str
- | Iterable[fonts.Font | str] = (
- fonts.GoogleFont("Quicksand"),
- "ui-sans-serif",
- "sans-serif",
- ),
- font_mono: fonts.Font
- | str
- | Iterable[fonts.Font | str] = (
- fonts.GoogleFont("IBM Plex Mono"),
- "ui-monospace",
- "monospace",
- ),
- ):
- super().__init__(
- primary_hue=primary_hue,
- secondary_hue=secondary_hue,
- neutral_hue=neutral_hue,
- spacing_size=spacing_size,
- radius_size=radius_size,
- font=font,
- font_mono=font_mono,
- )
- super().set(
- button_primary_background_fill="linear-gradient(90deg, *primary_300, *secondary_400)",
- button_primary_background_fill_hover="linear-gradient(90deg, *primary_200, *secondary_300)",
- button_primary_text_color="white",
- button_primary_background_fill_dark="linear-gradient(90deg, *primary_600, *secondary_800)",
- block_shadow="*shadow_drop_lg",
- button_shadow="*shadow_drop_lg",
- input_background_fill="zinc",
- input_border_color="*secondary_300",
- input_shadow="*shadow_drop",
- input_shadow_focus="*shadow_drop_lg",
- )
-
-
-seafoam = SeafoamCustom()
-
-
-with gr.Blocks(theme=seafoam, analytics_enabled=False, css=css) as demo:
- with gr.Column():
- gr.Markdown(
- """ ## GPT4ALL
-
- An ecosystem of open-source chatbots trained on a massive collections of clean assistant data including code, stories and dialogue
-
- Type in the box below and click the button to generate answers to your most pressing questions!
-
- """
- )
-
- with gr.Row():
- with gr.Column(scale=3):
- instruction = gr.Textbox(placeholder="Enter your question here", label="Question", elem_id="q-input")
-
- with gr.Box():
- gr.Markdown("**Answer**")
- output = gr.Markdown(elem_id="q-output")
- submit = gr.Button("Generate", variant="primary")
- gr.Examples(
- examples=examples,
- inputs=[instruction],
- cache_examples=True,
- fn=process_example,
- outputs=[output],
- )
-
-
-
- submit.click(generate, inputs=[instruction], outputs=[output])
- instruction.submit(generate, inputs=[instruction], outputs=[output])
-
-demo.queue(concurrency_count=1).launch(debug=True)
\ No newline at end of file
diff --git a/spaces/AlexWang/lama/saicinpainting/training/data/__init__.py b/spaces/AlexWang/lama/saicinpainting/training/data/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/sd_text2img_k_diffusion.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/sd_text2img_k_diffusion.py
deleted file mode 100644
index b7fbc46b67cbe88cd82f2f88b4fbcdeb1fac51e0..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/sd_text2img_k_diffusion.py
+++ /dev/null
@@ -1,475 +0,0 @@
-# Copyright 2023 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import importlib
-import warnings
-from typing import Callable, List, Optional, Union
-
-import torch
-from k_diffusion.external import CompVisDenoiser, CompVisVDenoiser
-
-from diffusers import DiffusionPipeline, LMSDiscreteScheduler
-from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
-from diffusers.utils import is_accelerate_available, logging
-
-
-logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-
-
-class ModelWrapper:
- def __init__(self, model, alphas_cumprod):
- self.model = model
- self.alphas_cumprod = alphas_cumprod
-
- def apply_model(self, *args, **kwargs):
- if len(args) == 3:
- encoder_hidden_states = args[-1]
- args = args[:2]
- if kwargs.get("cond", None) is not None:
- encoder_hidden_states = kwargs.pop("cond")
- return self.model(*args, encoder_hidden_states=encoder_hidden_states, **kwargs).sample
-
-
-class StableDiffusionPipeline(DiffusionPipeline):
- r"""
- Pipeline for text-to-image generation using Stable Diffusion.
-
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
-
- Args:
- vae ([`AutoencoderKL`]):
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
- text_encoder ([`CLIPTextModel`]):
- Frozen text-encoder. Stable Diffusion uses the text portion of
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
- tokenizer (`CLIPTokenizer`):
- Tokenizer of class
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
- scheduler ([`SchedulerMixin`]):
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
- safety_checker ([`StableDiffusionSafetyChecker`]):
- Classification module that estimates whether generated images could be considered offensive or harmful.
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
- feature_extractor ([`CLIPImageProcessor`]):
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
- """
- _optional_components = ["safety_checker", "feature_extractor"]
-
- def __init__(
- self,
- vae,
- text_encoder,
- tokenizer,
- unet,
- scheduler,
- safety_checker,
- feature_extractor,
- ):
- super().__init__()
-
- if safety_checker is None:
- logger.warning(
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
- )
-
- # get correct sigmas from LMS
- scheduler = LMSDiscreteScheduler.from_config(scheduler.config)
- self.register_modules(
- vae=vae,
- text_encoder=text_encoder,
- tokenizer=tokenizer,
- unet=unet,
- scheduler=scheduler,
- safety_checker=safety_checker,
- feature_extractor=feature_extractor,
- )
-
- model = ModelWrapper(unet, scheduler.alphas_cumprod)
- if scheduler.config.prediction_type == "v_prediction":
- self.k_diffusion_model = CompVisVDenoiser(model)
- else:
- self.k_diffusion_model = CompVisDenoiser(model)
-
- def set_sampler(self, scheduler_type: str):
- warnings.warn("The `set_sampler` method is deprecated, please use `set_scheduler` instead.")
- return self.set_scheduler(scheduler_type)
-
- def set_scheduler(self, scheduler_type: str):
- library = importlib.import_module("k_diffusion")
- sampling = getattr(library, "sampling")
- self.sampler = getattr(sampling, scheduler_type)
-
- def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
- r"""
- Enable sliced attention computation.
-
- When this option is enabled, the attention module will split the input tensor in slices, to compute attention
- in several steps. This is useful to save some memory in exchange for a small speed decrease.
-
- Args:
- slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
- When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
- a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
- `attention_head_dim` must be a multiple of `slice_size`.
- """
- if slice_size == "auto":
- # half the attention head size is usually a good trade-off between
- # speed and memory
- slice_size = self.unet.config.attention_head_dim // 2
- self.unet.set_attention_slice(slice_size)
-
- def disable_attention_slicing(self):
- r"""
- Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
- back to computing attention in one step.
- """
- # set slice_size = `None` to disable `attention slicing`
- self.enable_attention_slicing(None)
-
- def enable_sequential_cpu_offload(self, gpu_id=0):
- r"""
- Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
- text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
- `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
- """
- if is_accelerate_available():
- from accelerate import cpu_offload
- else:
- raise ImportError("Please install accelerate via `pip install accelerate`")
-
- device = torch.device(f"cuda:{gpu_id}")
-
- for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
- if cpu_offloaded_model is not None:
- cpu_offload(cpu_offloaded_model, device)
-
- @property
- def _execution_device(self):
- r"""
- Returns the device on which the pipeline's models will be executed. After calling
- `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
- hooks.
- """
- if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"):
- return self.device
- for module in self.unet.modules():
- if (
- hasattr(module, "_hf_hook")
- and hasattr(module._hf_hook, "execution_device")
- and module._hf_hook.execution_device is not None
- ):
- return torch.device(module._hf_hook.execution_device)
- return self.device
-
- def _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt):
- r"""
- Encodes the prompt into text encoder hidden states.
-
- Args:
- prompt (`str` or `list(int)`):
- prompt to be encoded
- device: (`torch.device`):
- torch device
- num_images_per_prompt (`int`):
- number of images that should be generated per prompt
- do_classifier_free_guidance (`bool`):
- whether to use classifier free guidance or not
- negative_prompt (`str` or `List[str]`):
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
- if `guidance_scale` is less than `1`).
- """
- batch_size = len(prompt) if isinstance(prompt, list) else 1
-
- text_inputs = self.tokenizer(
- prompt,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- truncation=True,
- return_tensors="pt",
- )
- text_input_ids = text_inputs.input_ids
- untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="pt").input_ids
-
- if not torch.equal(text_input_ids, untruncated_ids):
- removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
- logger.warning(
- "The following part of your input was truncated because CLIP can only handle sequences up to"
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
- )
-
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
- attention_mask = text_inputs.attention_mask.to(device)
- else:
- attention_mask = None
-
- text_embeddings = self.text_encoder(
- text_input_ids.to(device),
- attention_mask=attention_mask,
- )
- text_embeddings = text_embeddings[0]
-
- # duplicate text embeddings for each generation per prompt, using mps friendly method
- bs_embed, seq_len, _ = text_embeddings.shape
- text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
- text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
-
- # get unconditional embeddings for classifier free guidance
- if do_classifier_free_guidance:
- uncond_tokens: List[str]
- if negative_prompt is None:
- uncond_tokens = [""] * batch_size
- elif type(prompt) is not type(negative_prompt):
- raise TypeError(
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
- f" {type(prompt)}."
- )
- elif isinstance(negative_prompt, str):
- uncond_tokens = [negative_prompt]
- elif batch_size != len(negative_prompt):
- raise ValueError(
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
- " the batch size of `prompt`."
- )
- else:
- uncond_tokens = negative_prompt
-
- max_length = text_input_ids.shape[-1]
- uncond_input = self.tokenizer(
- uncond_tokens,
- padding="max_length",
- max_length=max_length,
- truncation=True,
- return_tensors="pt",
- )
-
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
- attention_mask = uncond_input.attention_mask.to(device)
- else:
- attention_mask = None
-
- uncond_embeddings = self.text_encoder(
- uncond_input.input_ids.to(device),
- attention_mask=attention_mask,
- )
- uncond_embeddings = uncond_embeddings[0]
-
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
- seq_len = uncond_embeddings.shape[1]
- uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)
- uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
-
- # For classifier free guidance, we need to do two forward passes.
- # Here we concatenate the unconditional and text embeddings into a single batch
- # to avoid doing two forward passes
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
-
- return text_embeddings
-
- def run_safety_checker(self, image, device, dtype):
- if self.safety_checker is not None:
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
- image, has_nsfw_concept = self.safety_checker(
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
- )
- else:
- has_nsfw_concept = None
- return image, has_nsfw_concept
-
- def decode_latents(self, latents):
- latents = 1 / 0.18215 * latents
- image = self.vae.decode(latents).sample
- image = (image / 2 + 0.5).clamp(0, 1)
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
- return image
-
- def check_inputs(self, prompt, height, width, callback_steps):
- if not isinstance(prompt, str) and not isinstance(prompt, list):
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
-
- if height % 8 != 0 or width % 8 != 0:
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
-
- if (callback_steps is None) or (
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
- ):
- raise ValueError(
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
- f" {type(callback_steps)}."
- )
-
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
- shape = (batch_size, num_channels_latents, height // 8, width // 8)
- if latents is None:
- if device.type == "mps":
- # randn does not work reproducibly on mps
- latents = torch.randn(shape, generator=generator, device="cpu", dtype=dtype).to(device)
- else:
- latents = torch.randn(shape, generator=generator, device=device, dtype=dtype)
- else:
- if latents.shape != shape:
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
- latents = latents.to(device)
-
- # scale the initial noise by the standard deviation required by the scheduler
- return latents
-
- @torch.no_grad()
- def __call__(
- self,
- prompt: Union[str, List[str]],
- height: int = 512,
- width: int = 512,
- num_inference_steps: int = 50,
- guidance_scale: float = 7.5,
- negative_prompt: Optional[Union[str, List[str]]] = None,
- num_images_per_prompt: Optional[int] = 1,
- eta: float = 0.0,
- generator: Optional[torch.Generator] = None,
- latents: Optional[torch.FloatTensor] = None,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
- callback_steps: int = 1,
- **kwargs,
- ):
- r"""
- Function invoked when calling the pipeline for generation.
-
- Args:
- prompt (`str` or `List[str]`):
- The prompt or prompts to guide the image generation.
- height (`int`, *optional*, defaults to 512):
- The height in pixels of the generated image.
- width (`int`, *optional*, defaults to 512):
- The width in pixels of the generated image.
- num_inference_steps (`int`, *optional*, defaults to 50):
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
- expense of slower inference.
- guidance_scale (`float`, *optional*, defaults to 7.5):
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
- usually at the expense of lower image quality.
- negative_prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
- if `guidance_scale` is less than `1`).
- num_images_per_prompt (`int`, *optional*, defaults to 1):
- The number of images to generate per prompt.
- eta (`float`, *optional*, defaults to 0.0):
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
- [`schedulers.DDIMScheduler`], will be ignored for others.
- generator (`torch.Generator`, *optional*):
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
- deterministic.
- latents (`torch.FloatTensor`, *optional*):
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
- tensor will ge generated by sampling using the supplied random `generator`.
- output_type (`str`, *optional*, defaults to `"pil"`):
- The output format of the generate image. Choose between
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
- return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
- plain tuple.
- callback (`Callable`, *optional*):
- A function that will be called every `callback_steps` steps during inference. The function will be
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
- callback_steps (`int`, *optional*, defaults to 1):
- The frequency at which the `callback` function will be called. If not specified, the callback will be
- called at every step.
-
- Returns:
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
- When returning a tuple, the first element is a list with the generated images, and the second element is a
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
- (nsfw) content, according to the `safety_checker`.
- """
-
- # 1. Check inputs. Raise error if not correct
- self.check_inputs(prompt, height, width, callback_steps)
-
- # 2. Define call parameters
- batch_size = 1 if isinstance(prompt, str) else len(prompt)
- device = self._execution_device
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
- # corresponds to doing no classifier free guidance.
- do_classifier_free_guidance = True
- if guidance_scale <= 1.0:
- raise ValueError("has to use guidance_scale")
-
- # 3. Encode input prompt
- text_embeddings = self._encode_prompt(
- prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
- )
-
- # 4. Prepare timesteps
- self.scheduler.set_timesteps(num_inference_steps, device=text_embeddings.device)
- sigmas = self.scheduler.sigmas
- sigmas = sigmas.to(text_embeddings.dtype)
-
- # 5. Prepare latent variables
- num_channels_latents = self.unet.config.in_channels
- latents = self.prepare_latents(
- batch_size * num_images_per_prompt,
- num_channels_latents,
- height,
- width,
- text_embeddings.dtype,
- device,
- generator,
- latents,
- )
- latents = latents * sigmas[0]
- self.k_diffusion_model.sigmas = self.k_diffusion_model.sigmas.to(latents.device)
- self.k_diffusion_model.log_sigmas = self.k_diffusion_model.log_sigmas.to(latents.device)
-
- def model_fn(x, t):
- latent_model_input = torch.cat([x] * 2)
-
- noise_pred = self.k_diffusion_model(latent_model_input, t, cond=text_embeddings)
-
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
- return noise_pred
-
- latents = self.sampler(model_fn, latents, sigmas)
-
- # 8. Post-processing
- image = self.decode_latents(latents)
-
- # 9. Run safety checker
- image, has_nsfw_concept = self.run_safety_checker(image, device, text_embeddings.dtype)
-
- # 10. Convert to PIL
- if output_type == "pil":
- image = self.numpy_to_pil(image)
-
- if not return_dict:
- return (image, has_nsfw_concept)
-
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/text_inpainting.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/text_inpainting.py
deleted file mode 100644
index 99a488788a0de6db78ae7c2c89038565efd29551..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/text_inpainting.py
+++ /dev/null
@@ -1,302 +0,0 @@
-from typing import Callable, List, Optional, Union
-
-import PIL
-import torch
-from transformers import (
- CLIPImageProcessor,
- CLIPSegForImageSegmentation,
- CLIPSegProcessor,
- CLIPTextModel,
- CLIPTokenizer,
-)
-
-from diffusers import DiffusionPipeline
-from diffusers.configuration_utils import FrozenDict
-from diffusers.models import AutoencoderKL, UNet2DConditionModel
-from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
-from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
-from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
-from diffusers.utils import deprecate, is_accelerate_available, logging
-
-
-logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-
-
-class TextInpainting(DiffusionPipeline):
- r"""
- Pipeline for text based inpainting using Stable Diffusion.
- Uses CLIPSeg to get a mask from the given text, then calls the Inpainting pipeline with the generated mask
-
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
-
- Args:
- segmentation_model ([`CLIPSegForImageSegmentation`]):
- CLIPSeg Model to generate mask from the given text. Please refer to the [model card]() for details.
- segmentation_processor ([`CLIPSegProcessor`]):
- CLIPSeg processor to get image, text features to translate prompt to English, if necessary. Please refer to the
- [model card](https://huggingface.co/docs/transformers/model_doc/clipseg) for details.
- vae ([`AutoencoderKL`]):
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
- text_encoder ([`CLIPTextModel`]):
- Frozen text-encoder. Stable Diffusion uses the text portion of
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
- tokenizer (`CLIPTokenizer`):
- Tokenizer of class
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
- scheduler ([`SchedulerMixin`]):
- A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
- safety_checker ([`StableDiffusionSafetyChecker`]):
- Classification module that estimates whether generated images could be considered offensive or harmful.
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
- feature_extractor ([`CLIPImageProcessor`]):
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
- """
-
- def __init__(
- self,
- segmentation_model: CLIPSegForImageSegmentation,
- segmentation_processor: CLIPSegProcessor,
- vae: AutoencoderKL,
- text_encoder: CLIPTextModel,
- tokenizer: CLIPTokenizer,
- unet: UNet2DConditionModel,
- scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
- safety_checker: StableDiffusionSafetyChecker,
- feature_extractor: CLIPImageProcessor,
- ):
- super().__init__()
-
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
- deprecation_message = (
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
- " file"
- )
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
- new_config = dict(scheduler.config)
- new_config["steps_offset"] = 1
- scheduler._internal_dict = FrozenDict(new_config)
-
- if hasattr(scheduler.config, "skip_prk_steps") and scheduler.config.skip_prk_steps is False:
- deprecation_message = (
- f"The configuration file of this scheduler: {scheduler} has not set the configuration"
- " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
- " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
- " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
- " Hub, it would be very nice if you could open a Pull request for the"
- " `scheduler/scheduler_config.json` file"
- )
- deprecate("skip_prk_steps not set", "1.0.0", deprecation_message, standard_warn=False)
- new_config = dict(scheduler.config)
- new_config["skip_prk_steps"] = True
- scheduler._internal_dict = FrozenDict(new_config)
-
- if safety_checker is None:
- logger.warning(
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
- )
-
- self.register_modules(
- segmentation_model=segmentation_model,
- segmentation_processor=segmentation_processor,
- vae=vae,
- text_encoder=text_encoder,
- tokenizer=tokenizer,
- unet=unet,
- scheduler=scheduler,
- safety_checker=safety_checker,
- feature_extractor=feature_extractor,
- )
-
- def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
- r"""
- Enable sliced attention computation.
-
- When this option is enabled, the attention module will split the input tensor in slices, to compute attention
- in several steps. This is useful to save some memory in exchange for a small speed decrease.
-
- Args:
- slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
- When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
- a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
- `attention_head_dim` must be a multiple of `slice_size`.
- """
- if slice_size == "auto":
- # half the attention head size is usually a good trade-off between
- # speed and memory
- slice_size = self.unet.config.attention_head_dim // 2
- self.unet.set_attention_slice(slice_size)
-
- def disable_attention_slicing(self):
- r"""
- Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
- back to computing attention in one step.
- """
- # set slice_size = `None` to disable `attention slicing`
- self.enable_attention_slicing(None)
-
- def enable_sequential_cpu_offload(self):
- r"""
- Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
- text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
- `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
- """
- if is_accelerate_available():
- from accelerate import cpu_offload
- else:
- raise ImportError("Please install accelerate via `pip install accelerate`")
-
- device = torch.device("cuda")
-
- for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
- if cpu_offloaded_model is not None:
- cpu_offload(cpu_offloaded_model, device)
-
- @property
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
- def _execution_device(self):
- r"""
- Returns the device on which the pipeline's models will be executed. After calling
- `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
- hooks.
- """
- if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"):
- return self.device
- for module in self.unet.modules():
- if (
- hasattr(module, "_hf_hook")
- and hasattr(module._hf_hook, "execution_device")
- and module._hf_hook.execution_device is not None
- ):
- return torch.device(module._hf_hook.execution_device)
- return self.device
-
- @torch.no_grad()
- def __call__(
- self,
- prompt: Union[str, List[str]],
- image: Union[torch.FloatTensor, PIL.Image.Image],
- text: str,
- height: int = 512,
- width: int = 512,
- num_inference_steps: int = 50,
- guidance_scale: float = 7.5,
- negative_prompt: Optional[Union[str, List[str]]] = None,
- num_images_per_prompt: Optional[int] = 1,
- eta: float = 0.0,
- generator: Optional[torch.Generator] = None,
- latents: Optional[torch.FloatTensor] = None,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
- callback_steps: int = 1,
- **kwargs,
- ):
- r"""
- Function invoked when calling the pipeline for generation.
-
- Args:
- prompt (`str` or `List[str]`):
- The prompt or prompts to guide the image generation.
- image (`PIL.Image.Image`):
- `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
- be masked out with `mask_image` and repainted according to `prompt`.
- text (`str``):
- The text to use to generate the mask.
- height (`int`, *optional*, defaults to 512):
- The height in pixels of the generated image.
- width (`int`, *optional*, defaults to 512):
- The width in pixels of the generated image.
- num_inference_steps (`int`, *optional*, defaults to 50):
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
- expense of slower inference.
- guidance_scale (`float`, *optional*, defaults to 7.5):
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
- usually at the expense of lower image quality.
- negative_prompt (`str` or `List[str]`, *optional*):
- The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
- if `guidance_scale` is less than `1`).
- num_images_per_prompt (`int`, *optional*, defaults to 1):
- The number of images to generate per prompt.
- eta (`float`, *optional*, defaults to 0.0):
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
- [`schedulers.DDIMScheduler`], will be ignored for others.
- generator (`torch.Generator`, *optional*):
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
- deterministic.
- latents (`torch.FloatTensor`, *optional*):
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
- tensor will ge generated by sampling using the supplied random `generator`.
- output_type (`str`, *optional*, defaults to `"pil"`):
- The output format of the generate image. Choose between
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
- return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
- plain tuple.
- callback (`Callable`, *optional*):
- A function that will be called every `callback_steps` steps during inference. The function will be
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
- callback_steps (`int`, *optional*, defaults to 1):
- The frequency at which the `callback` function will be called. If not specified, the callback will be
- called at every step.
-
- Returns:
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
- When returning a tuple, the first element is a list with the generated images, and the second element is a
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
- (nsfw) content, according to the `safety_checker`.
- """
-
- # We use the input text to generate the mask
- inputs = self.segmentation_processor(
- text=[text], images=[image], padding="max_length", return_tensors="pt"
- ).to(self.device)
- outputs = self.segmentation_model(**inputs)
- mask = torch.sigmoid(outputs.logits).cpu().detach().unsqueeze(-1).numpy()
- mask_pil = self.numpy_to_pil(mask)[0].resize(image.size)
-
- # Run inpainting pipeline with the generated mask
- inpainting_pipeline = StableDiffusionInpaintPipeline(
- vae=self.vae,
- text_encoder=self.text_encoder,
- tokenizer=self.tokenizer,
- unet=self.unet,
- scheduler=self.scheduler,
- safety_checker=self.safety_checker,
- feature_extractor=self.feature_extractor,
- )
- return inpainting_pipeline(
- prompt=prompt,
- image=image,
- mask_image=mask_pil,
- height=height,
- width=width,
- num_inference_steps=num_inference_steps,
- guidance_scale=guidance_scale,
- negative_prompt=negative_prompt,
- num_images_per_prompt=num_images_per_prompt,
- eta=eta,
- generator=generator,
- latents=latents,
- output_type=output_type,
- return_dict=return_dict,
- callback=callback,
- callback_steps=callback_steps,
- )
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky/test_kandinsky_combined.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky/test_kandinsky_combined.py
deleted file mode 100644
index 21c8e78cfade2b7b55a91c4badb0a701c311b2e9..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/kandinsky/test_kandinsky_combined.py
+++ /dev/null
@@ -1,335 +0,0 @@
-# coding=utf-8
-# Copyright 2023 HuggingFace Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import unittest
-
-import numpy as np
-
-from diffusers import KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyInpaintCombinedPipeline
-from diffusers.utils import torch_device
-from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
-
-from ..test_pipelines_common import PipelineTesterMixin
-from .test_kandinsky import Dummies
-from .test_kandinsky_img2img import Dummies as Img2ImgDummies
-from .test_kandinsky_inpaint import Dummies as InpaintDummies
-from .test_kandinsky_prior import Dummies as PriorDummies
-
-
-enable_full_determinism()
-
-
-class KandinskyPipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase):
- pipeline_class = KandinskyCombinedPipeline
- params = [
- "prompt",
- ]
- batch_params = ["prompt", "negative_prompt"]
- required_optional_params = [
- "generator",
- "height",
- "width",
- "latents",
- "guidance_scale",
- "negative_prompt",
- "num_inference_steps",
- "return_dict",
- "guidance_scale",
- "num_images_per_prompt",
- "output_type",
- "return_dict",
- ]
- test_xformers_attention = False
-
- def get_dummy_components(self):
- dummy = Dummies()
- prior_dummy = PriorDummies()
- components = dummy.get_dummy_components()
-
- components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()})
- return components
-
- def get_dummy_inputs(self, device, seed=0):
- prior_dummy = PriorDummies()
- inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed)
- inputs.update(
- {
- "height": 64,
- "width": 64,
- }
- )
- return inputs
-
- def test_kandinsky(self):
- device = "cpu"
-
- components = self.get_dummy_components()
-
- pipe = self.pipeline_class(**components)
- pipe = pipe.to(device)
-
- pipe.set_progress_bar_config(disable=None)
-
- output = pipe(**self.get_dummy_inputs(device))
- image = output.images
-
- image_from_tuple = pipe(
- **self.get_dummy_inputs(device),
- return_dict=False,
- )[0]
-
- image_slice = image[0, -3:, -3:, -1]
- image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
-
- assert image.shape == (1, 64, 64, 3)
-
- expected_slice = np.array([0.0000, 0.0000, 0.6777, 0.1363, 0.3624, 0.7868, 0.3869, 0.3395, 0.5068])
-
- assert (
- np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
- ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
- assert (
- np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
- ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
-
- @require_torch_gpu
- def test_offloads(self):
- pipes = []
- components = self.get_dummy_components()
- sd_pipe = self.pipeline_class(**components).to(torch_device)
- pipes.append(sd_pipe)
-
- components = self.get_dummy_components()
- sd_pipe = self.pipeline_class(**components)
- sd_pipe.enable_model_cpu_offload()
- pipes.append(sd_pipe)
-
- components = self.get_dummy_components()
- sd_pipe = self.pipeline_class(**components)
- sd_pipe.enable_sequential_cpu_offload()
- pipes.append(sd_pipe)
-
- image_slices = []
- for pipe in pipes:
- inputs = self.get_dummy_inputs(torch_device)
- image = pipe(**inputs).images
-
- image_slices.append(image[0, -3:, -3:, -1].flatten())
-
- assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
- assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
-
- def test_inference_batch_single_identical(self):
- super().test_inference_batch_single_identical(expected_max_diff=1e-2)
-
-
-class KandinskyPipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase):
- pipeline_class = KandinskyImg2ImgCombinedPipeline
- params = ["prompt", "image"]
- batch_params = ["prompt", "negative_prompt", "image"]
- required_optional_params = [
- "generator",
- "height",
- "width",
- "latents",
- "guidance_scale",
- "negative_prompt",
- "num_inference_steps",
- "return_dict",
- "guidance_scale",
- "num_images_per_prompt",
- "output_type",
- "return_dict",
- ]
- test_xformers_attention = False
-
- def get_dummy_components(self):
- dummy = Img2ImgDummies()
- prior_dummy = PriorDummies()
- components = dummy.get_dummy_components()
-
- components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()})
- return components
-
- def get_dummy_inputs(self, device, seed=0):
- prior_dummy = PriorDummies()
- dummy = Img2ImgDummies()
- inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed)
- inputs.update(dummy.get_dummy_inputs(device=device, seed=seed))
- inputs.pop("image_embeds")
- inputs.pop("negative_image_embeds")
- return inputs
-
- def test_kandinsky(self):
- device = "cpu"
-
- components = self.get_dummy_components()
-
- pipe = self.pipeline_class(**components)
- pipe = pipe.to(device)
-
- pipe.set_progress_bar_config(disable=None)
-
- output = pipe(**self.get_dummy_inputs(device))
- image = output.images
-
- image_from_tuple = pipe(
- **self.get_dummy_inputs(device),
- return_dict=False,
- )[0]
-
- image_slice = image[0, -3:, -3:, -1]
- image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
-
- assert image.shape == (1, 64, 64, 3)
-
- expected_slice = np.array([0.4260, 0.3596, 0.4571, 0.3890, 0.4087, 0.5137, 0.4819, 0.4116, 0.5053])
-
- assert (
- np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
- ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
- assert (
- np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
- ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
-
- @require_torch_gpu
- def test_offloads(self):
- pipes = []
- components = self.get_dummy_components()
- sd_pipe = self.pipeline_class(**components).to(torch_device)
- pipes.append(sd_pipe)
-
- components = self.get_dummy_components()
- sd_pipe = self.pipeline_class(**components)
- sd_pipe.enable_model_cpu_offload()
- pipes.append(sd_pipe)
-
- components = self.get_dummy_components()
- sd_pipe = self.pipeline_class(**components)
- sd_pipe.enable_sequential_cpu_offload()
- pipes.append(sd_pipe)
-
- image_slices = []
- for pipe in pipes:
- inputs = self.get_dummy_inputs(torch_device)
- image = pipe(**inputs).images
-
- image_slices.append(image[0, -3:, -3:, -1].flatten())
-
- assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
- assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
-
- def test_inference_batch_single_identical(self):
- super().test_inference_batch_single_identical(expected_max_diff=1e-2)
-
-
-class KandinskyPipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase):
- pipeline_class = KandinskyInpaintCombinedPipeline
- params = ["prompt", "image", "mask_image"]
- batch_params = ["prompt", "negative_prompt", "image", "mask_image"]
- required_optional_params = [
- "generator",
- "height",
- "width",
- "latents",
- "guidance_scale",
- "negative_prompt",
- "num_inference_steps",
- "return_dict",
- "guidance_scale",
- "num_images_per_prompt",
- "output_type",
- "return_dict",
- ]
- test_xformers_attention = False
-
- def get_dummy_components(self):
- dummy = InpaintDummies()
- prior_dummy = PriorDummies()
- components = dummy.get_dummy_components()
-
- components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()})
- return components
-
- def get_dummy_inputs(self, device, seed=0):
- prior_dummy = PriorDummies()
- dummy = InpaintDummies()
- inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed)
- inputs.update(dummy.get_dummy_inputs(device=device, seed=seed))
- inputs.pop("image_embeds")
- inputs.pop("negative_image_embeds")
- return inputs
-
- def test_kandinsky(self):
- device = "cpu"
-
- components = self.get_dummy_components()
-
- pipe = self.pipeline_class(**components)
- pipe = pipe.to(device)
-
- pipe.set_progress_bar_config(disable=None)
-
- output = pipe(**self.get_dummy_inputs(device))
- image = output.images
-
- image_from_tuple = pipe(
- **self.get_dummy_inputs(device),
- return_dict=False,
- )[0]
-
- image_slice = image[0, -3:, -3:, -1]
- image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
-
- assert image.shape == (1, 64, 64, 3)
-
- expected_slice = np.array([0.0477, 0.0808, 0.2972, 0.2705, 0.3620, 0.6247, 0.4464, 0.2870, 0.3530])
-
- assert (
- np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
- ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
- assert (
- np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
- ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
-
- @require_torch_gpu
- def test_offloads(self):
- pipes = []
- components = self.get_dummy_components()
- sd_pipe = self.pipeline_class(**components).to(torch_device)
- pipes.append(sd_pipe)
-
- components = self.get_dummy_components()
- sd_pipe = self.pipeline_class(**components)
- sd_pipe.enable_model_cpu_offload()
- pipes.append(sd_pipe)
-
- components = self.get_dummy_components()
- sd_pipe = self.pipeline_class(**components)
- sd_pipe.enable_sequential_cpu_offload()
- pipes.append(sd_pipe)
-
- image_slices = []
- for pipe in pipes:
- inputs = self.get_dummy_inputs(torch_device)
- image = pipe(**inputs).images
-
- image_slices.append(image[0, -3:, -3:, -1].flatten())
-
- assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3
- assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3
-
- def test_inference_batch_single_identical(self):
- super().test_inference_batch_single_identical(expected_max_diff=1e-2)
diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/ga_rpn_head.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/ga_rpn_head.py
deleted file mode 100644
index 2ec0d4fdd3475bfbd2e541a6e8130b1df9ad861a..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/ga_rpn_head.py
+++ /dev/null
@@ -1,171 +0,0 @@
-import copy
-import warnings
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from mmcv import ConfigDict
-from mmcv.cnn import normal_init
-from mmcv.ops import nms
-
-from ..builder import HEADS
-from .guided_anchor_head import GuidedAnchorHead
-from .rpn_test_mixin import RPNTestMixin
-
-
-@HEADS.register_module()
-class GARPNHead(RPNTestMixin, GuidedAnchorHead):
- """Guided-Anchor-based RPN head."""
-
- def __init__(self, in_channels, **kwargs):
- super(GARPNHead, self).__init__(1, in_channels, **kwargs)
-
- def _init_layers(self):
- """Initialize layers of the head."""
- self.rpn_conv = nn.Conv2d(
- self.in_channels, self.feat_channels, 3, padding=1)
- super(GARPNHead, self)._init_layers()
-
- def init_weights(self):
- """Initialize weights of the head."""
- normal_init(self.rpn_conv, std=0.01)
- super(GARPNHead, self).init_weights()
-
- def forward_single(self, x):
- """Forward feature of a single scale level."""
-
- x = self.rpn_conv(x)
- x = F.relu(x, inplace=True)
- (cls_score, bbox_pred, shape_pred,
- loc_pred) = super(GARPNHead, self).forward_single(x)
- return cls_score, bbox_pred, shape_pred, loc_pred
-
- def loss(self,
- cls_scores,
- bbox_preds,
- shape_preds,
- loc_preds,
- gt_bboxes,
- img_metas,
- gt_bboxes_ignore=None):
- losses = super(GARPNHead, self).loss(
- cls_scores,
- bbox_preds,
- shape_preds,
- loc_preds,
- gt_bboxes,
- None,
- img_metas,
- gt_bboxes_ignore=gt_bboxes_ignore)
- return dict(
- loss_rpn_cls=losses['loss_cls'],
- loss_rpn_bbox=losses['loss_bbox'],
- loss_anchor_shape=losses['loss_shape'],
- loss_anchor_loc=losses['loss_loc'])
-
- def _get_bboxes_single(self,
- cls_scores,
- bbox_preds,
- mlvl_anchors,
- mlvl_masks,
- img_shape,
- scale_factor,
- cfg,
- rescale=False):
- cfg = self.test_cfg if cfg is None else cfg
-
- cfg = copy.deepcopy(cfg)
-
- # deprecate arguments warning
- if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg:
- warnings.warn(
- 'In rpn_proposal or test_cfg, '
- 'nms_thr has been moved to a dict named nms as '
- 'iou_threshold, max_num has been renamed as max_per_img, '
- 'name of original arguments and the way to specify '
- 'iou_threshold of NMS will be deprecated.')
- if 'nms' not in cfg:
- cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr))
- if 'max_num' in cfg:
- if 'max_per_img' in cfg:
- assert cfg.max_num == cfg.max_per_img, f'You ' \
- f'set max_num and max_per_img at the same time, ' \
- f'but get {cfg.max_num} ' \
- f'and {cfg.max_per_img} respectively' \
- 'Please delete max_num which will be deprecated.'
- else:
- cfg.max_per_img = cfg.max_num
- if 'nms_thr' in cfg:
- assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \
- f'iou_threshold in nms and ' \
- f'nms_thr at the same time, but get ' \
- f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \
- f' respectively. Please delete the ' \
- f'nms_thr which will be deprecated.'
-
- assert cfg.nms.get('type', 'nms') == 'nms', 'GARPNHead only support ' \
- 'naive nms.'
-
- mlvl_proposals = []
- for idx in range(len(cls_scores)):
- rpn_cls_score = cls_scores[idx]
- rpn_bbox_pred = bbox_preds[idx]
- anchors = mlvl_anchors[idx]
- mask = mlvl_masks[idx]
- assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]
- # if no location is kept, end.
- if mask.sum() == 0:
- continue
- rpn_cls_score = rpn_cls_score.permute(1, 2, 0)
- if self.use_sigmoid_cls:
- rpn_cls_score = rpn_cls_score.reshape(-1)
- scores = rpn_cls_score.sigmoid()
- else:
- rpn_cls_score = rpn_cls_score.reshape(-1, 2)
- # remind that we set FG labels to [0, num_class-1]
- # since mmdet v2.0
- # BG cat_id: num_class
- scores = rpn_cls_score.softmax(dim=1)[:, :-1]
- # filter scores, bbox_pred w.r.t. mask.
- # anchors are filtered in get_anchors() beforehand.
- scores = scores[mask]
- rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1,
- 4)[mask, :]
- if scores.dim() == 0:
- rpn_bbox_pred = rpn_bbox_pred.unsqueeze(0)
- anchors = anchors.unsqueeze(0)
- scores = scores.unsqueeze(0)
- # filter anchors, bbox_pred, scores w.r.t. scores
- if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre:
- _, topk_inds = scores.topk(cfg.nms_pre)
- rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]
- anchors = anchors[topk_inds, :]
- scores = scores[topk_inds]
- # get proposals w.r.t. anchors and rpn_bbox_pred
- proposals = self.bbox_coder.decode(
- anchors, rpn_bbox_pred, max_shape=img_shape)
- # filter out too small bboxes
- if cfg.min_bbox_size > 0:
- w = proposals[:, 2] - proposals[:, 0]
- h = proposals[:, 3] - proposals[:, 1]
- valid_inds = torch.nonzero(
- (w >= cfg.min_bbox_size) & (h >= cfg.min_bbox_size),
- as_tuple=False).squeeze()
- proposals = proposals[valid_inds, :]
- scores = scores[valid_inds]
- # NMS in current level
- proposals, _ = nms(proposals, scores, cfg.nms.iou_threshold)
- proposals = proposals[:cfg.nms_post, :]
- mlvl_proposals.append(proposals)
- proposals = torch.cat(mlvl_proposals, 0)
- if cfg.get('nms_across_levels', False):
- # NMS across multi levels
- proposals, _ = nms(proposals[:, :4], proposals[:, -1],
- cfg.nms.iou_threshold)
- proposals = proposals[:cfg.max_per_img, :]
- else:
- scores = proposals[:, 4]
- num = min(cfg.max_per_img, proposals.shape[0])
- _, topk_inds = scores.topk(num)
- proposals = proposals[topk_inds, :]
- return proposals
diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/default_constructor.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/default_constructor.py
deleted file mode 100644
index 3f1f5b44168768dfda3947393a63a6cf9cf50b41..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/default_constructor.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from .builder import RUNNER_BUILDERS, RUNNERS
-
-
-@RUNNER_BUILDERS.register_module()
-class DefaultRunnerConstructor:
- """Default constructor for runners.
-
- Custom existing `Runner` like `EpocBasedRunner` though `RunnerConstructor`.
- For example, We can inject some new properties and functions for `Runner`.
-
- Example:
- >>> from annotator.uniformer.mmcv.runner import RUNNER_BUILDERS, build_runner
- >>> # Define a new RunnerReconstructor
- >>> @RUNNER_BUILDERS.register_module()
- >>> class MyRunnerConstructor:
- ... def __init__(self, runner_cfg, default_args=None):
- ... if not isinstance(runner_cfg, dict):
- ... raise TypeError('runner_cfg should be a dict',
- ... f'but got {type(runner_cfg)}')
- ... self.runner_cfg = runner_cfg
- ... self.default_args = default_args
- ...
- ... def __call__(self):
- ... runner = RUNNERS.build(self.runner_cfg,
- ... default_args=self.default_args)
- ... # Add new properties for existing runner
- ... runner.my_name = 'my_runner'
- ... runner.my_function = lambda self: print(self.my_name)
- ... ...
- >>> # build your runner
- >>> runner_cfg = dict(type='EpochBasedRunner', max_epochs=40,
- ... constructor='MyRunnerConstructor')
- >>> runner = build_runner(runner_cfg)
- """
-
- def __init__(self, runner_cfg, default_args=None):
- if not isinstance(runner_cfg, dict):
- raise TypeError('runner_cfg should be a dict',
- f'but got {type(runner_cfg)}')
- self.runner_cfg = runner_cfg
- self.default_args = default_args
-
- def __call__(self):
- return RUNNERS.build(self.runner_cfg, default_args=self.default_args)
diff --git a/spaces/Artificio/AdversarialArt/README.md b/spaces/Artificio/AdversarialArt/README.md
deleted file mode 100644
index 450f7e62b14fd833017a0c0c6838b93c643322f5..0000000000000000000000000000000000000000
--- a/spaces/Artificio/AdversarialArt/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: AdversarialArt
-emoji: 🏢
-colorFrom: blue
-colorTo: green
-sdk: gradio
-sdk_version: 3.1.6
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Artrajz/vits-simple-api/bert_vits2/text/tone_sandhi.py b/spaces/Artrajz/vits-simple-api/bert_vits2/text/tone_sandhi.py
deleted file mode 100644
index 6a6e4c3e64f1a9e8b9da73fc6fbebf8a33e5602d..0000000000000000000000000000000000000000
--- a/spaces/Artrajz/vits-simple-api/bert_vits2/text/tone_sandhi.py
+++ /dev/null
@@ -1,769 +0,0 @@
-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from typing import List
-from typing import Tuple
-
-import jieba
-from pypinyin import lazy_pinyin
-from pypinyin import Style
-
-
-class ToneSandhi:
- def __init__(self):
- self.must_neural_tone_words = {
- "麻烦",
- "麻利",
- "鸳鸯",
- "高粱",
- "骨头",
- "骆驼",
- "马虎",
- "首饰",
- "馒头",
- "馄饨",
- "风筝",
- "难为",
- "队伍",
- "阔气",
- "闺女",
- "门道",
- "锄头",
- "铺盖",
- "铃铛",
- "铁匠",
- "钥匙",
- "里脊",
- "里头",
- "部分",
- "那么",
- "道士",
- "造化",
- "迷糊",
- "连累",
- "这么",
- "这个",
- "运气",
- "过去",
- "软和",
- "转悠",
- "踏实",
- "跳蚤",
- "跟头",
- "趔趄",
- "财主",
- "豆腐",
- "讲究",
- "记性",
- "记号",
- "认识",
- "规矩",
- "见识",
- "裁缝",
- "补丁",
- "衣裳",
- "衣服",
- "衙门",
- "街坊",
- "行李",
- "行当",
- "蛤蟆",
- "蘑菇",
- "薄荷",
- "葫芦",
- "葡萄",
- "萝卜",
- "荸荠",
- "苗条",
- "苗头",
- "苍蝇",
- "芝麻",
- "舒服",
- "舒坦",
- "舌头",
- "自在",
- "膏药",
- "脾气",
- "脑袋",
- "脊梁",
- "能耐",
- "胳膊",
- "胭脂",
- "胡萝",
- "胡琴",
- "胡同",
- "聪明",
- "耽误",
- "耽搁",
- "耷拉",
- "耳朵",
- "老爷",
- "老实",
- "老婆",
- "老头",
- "老太",
- "翻腾",
- "罗嗦",
- "罐头",
- "编辑",
- "结实",
- "红火",
- "累赘",
- "糨糊",
- "糊涂",
- "精神",
- "粮食",
- "簸箕",
- "篱笆",
- "算计",
- "算盘",
- "答应",
- "笤帚",
- "笑语",
- "笑话",
- "窟窿",
- "窝囊",
- "窗户",
- "稳当",
- "稀罕",
- "称呼",
- "秧歌",
- "秀气",
- "秀才",
- "福气",
- "祖宗",
- "砚台",
- "码头",
- "石榴",
- "石头",
- "石匠",
- "知识",
- "眼睛",
- "眯缝",
- "眨巴",
- "眉毛",
- "相声",
- "盘算",
- "白净",
- "痢疾",
- "痛快",
- "疟疾",
- "疙瘩",
- "疏忽",
- "畜生",
- "生意",
- "甘蔗",
- "琵琶",
- "琢磨",
- "琉璃",
- "玻璃",
- "玫瑰",
- "玄乎",
- "狐狸",
- "状元",
- "特务",
- "牲口",
- "牙碜",
- "牌楼",
- "爽快",
- "爱人",
- "热闹",
- "烧饼",
- "烟筒",
- "烂糊",
- "点心",
- "炊帚",
- "灯笼",
- "火候",
- "漂亮",
- "滑溜",
- "溜达",
- "温和",
- "清楚",
- "消息",
- "浪头",
- "活泼",
- "比方",
- "正经",
- "欺负",
- "模糊",
- "槟榔",
- "棺材",
- "棒槌",
- "棉花",
- "核桃",
- "栅栏",
- "柴火",
- "架势",
- "枕头",
- "枇杷",
- "机灵",
- "本事",
- "木头",
- "木匠",
- "朋友",
- "月饼",
- "月亮",
- "暖和",
- "明白",
- "时候",
- "新鲜",
- "故事",
- "收拾",
- "收成",
- "提防",
- "挖苦",
- "挑剔",
- "指甲",
- "指头",
- "拾掇",
- "拳头",
- "拨弄",
- "招牌",
- "招呼",
- "抬举",
- "护士",
- "折腾",
- "扫帚",
- "打量",
- "打算",
- "打点",
- "打扮",
- "打听",
- "打发",
- "扎实",
- "扁担",
- "戒指",
- "懒得",
- "意识",
- "意思",
- "情形",
- "悟性",
- "怪物",
- "思量",
- "怎么",
- "念头",
- "念叨",
- "快活",
- "忙活",
- "志气",
- "心思",
- "得罪",
- "张罗",
- "弟兄",
- "开通",
- "应酬",
- "庄稼",
- "干事",
- "帮手",
- "帐篷",
- "希罕",
- "师父",
- "师傅",
- "巴结",
- "巴掌",
- "差事",
- "工夫",
- "岁数",
- "屁股",
- "尾巴",
- "少爷",
- "小气",
- "小伙",
- "将就",
- "对头",
- "对付",
- "寡妇",
- "家伙",
- "客气",
- "实在",
- "官司",
- "学问",
- "学生",
- "字号",
- "嫁妆",
- "媳妇",
- "媒人",
- "婆家",
- "娘家",
- "委屈",
- "姑娘",
- "姐夫",
- "妯娌",
- "妥当",
- "妖精",
- "奴才",
- "女婿",
- "头发",
- "太阳",
- "大爷",
- "大方",
- "大意",
- "大夫",
- "多少",
- "多么",
- "外甥",
- "壮实",
- "地道",
- "地方",
- "在乎",
- "困难",
- "嘴巴",
- "嘱咐",
- "嘟囔",
- "嘀咕",
- "喜欢",
- "喇嘛",
- "喇叭",
- "商量",
- "唾沫",
- "哑巴",
- "哈欠",
- "哆嗦",
- "咳嗽",
- "和尚",
- "告诉",
- "告示",
- "含糊",
- "吓唬",
- "后头",
- "名字",
- "名堂",
- "合同",
- "吆喝",
- "叫唤",
- "口袋",
- "厚道",
- "厉害",
- "千斤",
- "包袱",
- "包涵",
- "匀称",
- "勤快",
- "动静",
- "动弹",
- "功夫",
- "力气",
- "前头",
- "刺猬",
- "刺激",
- "别扭",
- "利落",
- "利索",
- "利害",
- "分析",
- "出息",
- "凑合",
- "凉快",
- "冷战",
- "冤枉",
- "冒失",
- "养活",
- "关系",
- "先生",
- "兄弟",
- "便宜",
- "使唤",
- "佩服",
- "作坊",
- "体面",
- "位置",
- "似的",
- "伙计",
- "休息",
- "什么",
- "人家",
- "亲戚",
- "亲家",
- "交情",
- "云彩",
- "事情",
- "买卖",
- "主意",
- "丫头",
- "丧气",
- "两口",
- "东西",
- "东家",
- "世故",
- "不由",
- "不在",
- "下水",
- "下巴",
- "上头",
- "上司",
- "丈夫",
- "丈人",
- "一辈",
- "那个",
- "菩萨",
- "父亲",
- "母亲",
- "咕噜",
- "邋遢",
- "费用",
- "冤家",
- "甜头",
- "介绍",
- "荒唐",
- "大人",
- "泥鳅",
- "幸福",
- "熟悉",
- "计划",
- "扑腾",
- "蜡烛",
- "姥爷",
- "照顾",
- "喉咙",
- "吉他",
- "弄堂",
- "蚂蚱",
- "凤凰",
- "拖沓",
- "寒碜",
- "糟蹋",
- "倒腾",
- "报复",
- "逻辑",
- "盘缠",
- "喽啰",
- "牢骚",
- "咖喱",
- "扫把",
- "惦记",
- }
- self.must_not_neural_tone_words = {
- "男子",
- "女子",
- "分子",
- "原子",
- "量子",
- "莲子",
- "石子",
- "瓜子",
- "电子",
- "人人",
- "虎虎",
- }
- self.punc = ":,;。?!“”‘’':,;.?!"
-
- # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041
- # e.g.
- # word: "家里"
- # pos: "s"
- # finals: ['ia1', 'i3']
- def _neural_sandhi(self, word: str, pos: str, finals: List[str]) -> List[str]:
- # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺
- for j, item in enumerate(word):
- if (
- j - 1 >= 0
- and item == word[j - 1]
- and pos[0] in {"n", "v", "a"}
- and word not in self.must_not_neural_tone_words
- ):
- finals[j] = finals[j][:-1] + "5"
- ge_idx = word.find("个")
- if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶":
- finals[-1] = finals[-1][:-1] + "5"
- elif len(word) >= 1 and word[-1] in "的地得":
- finals[-1] = finals[-1][:-1] + "5"
- # e.g. 走了, 看着, 去过
- # elif len(word) == 1 and word in "了着过" and pos in {"ul", "uz", "ug"}:
- # finals[-1] = finals[-1][:-1] + "5"
- elif (
- len(word) > 1
- and word[-1] in "们子"
- and pos in {"r", "n"}
- and word not in self.must_not_neural_tone_words
- ):
- finals[-1] = finals[-1][:-1] + "5"
- # e.g. 桌上, 地下, 家里
- elif len(word) > 1 and word[-1] in "上下里" and pos in {"s", "l", "f"}:
- finals[-1] = finals[-1][:-1] + "5"
- # e.g. 上来, 下去
- elif len(word) > 1 and word[-1] in "来去" and word[-2] in "上下进出回过起开":
- finals[-1] = finals[-1][:-1] + "5"
- # 个做量词
- elif (
- ge_idx >= 1
- and (word[ge_idx - 1].isnumeric() or word[ge_idx - 1] in "几有两半多各整每做是")
- ) or word == "个":
- finals[ge_idx] = finals[ge_idx][:-1] + "5"
- else:
- if (
- word in self.must_neural_tone_words
- or word[-2:] in self.must_neural_tone_words
- ):
- finals[-1] = finals[-1][:-1] + "5"
-
- word_list = self._split_word(word)
- finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]
- for i, word in enumerate(word_list):
- # conventional neural in Chinese
- if (
- word in self.must_neural_tone_words
- or word[-2:] in self.must_neural_tone_words
- ):
- finals_list[i][-1] = finals_list[i][-1][:-1] + "5"
- finals = sum(finals_list, [])
- return finals
-
- def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]:
- # e.g. 看不懂
- if len(word) == 3 and word[1] == "不":
- finals[1] = finals[1][:-1] + "5"
- else:
- for i, char in enumerate(word):
- # "不" before tone4 should be bu2, e.g. 不怕
- if char == "不" and i + 1 < len(word) and finals[i + 1][-1] == "4":
- finals[i] = finals[i][:-1] + "2"
- return finals
-
- def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]:
- # "一" in number sequences, e.g. 一零零, 二一零
- if word.find("一") != -1 and all(
- [item.isnumeric() for item in word if item != "一"]
- ):
- return finals
- # "一" between reduplication words should be yi5, e.g. 看一看
- elif len(word) == 3 and word[1] == "一" and word[0] == word[-1]:
- finals[1] = finals[1][:-1] + "5"
- # when "一" is ordinal word, it should be yi1
- elif word.startswith("第一"):
- finals[1] = finals[1][:-1] + "1"
- else:
- for i, char in enumerate(word):
- if char == "一" and i + 1 < len(word):
- # "一" before tone4 should be yi2, e.g. 一段
- if finals[i + 1][-1] == "4":
- finals[i] = finals[i][:-1] + "2"
- # "一" before non-tone4 should be yi4, e.g. 一天
- else:
- # "一" 后面如果是标点,还读一声
- if word[i + 1] not in self.punc:
- finals[i] = finals[i][:-1] + "4"
- return finals
-
- def _split_word(self, word: str) -> List[str]:
- word_list = jieba.cut_for_search(word)
- word_list = sorted(word_list, key=lambda i: len(i), reverse=False)
- first_subword = word_list[0]
- first_begin_idx = word.find(first_subword)
- if first_begin_idx == 0:
- second_subword = word[len(first_subword) :]
- new_word_list = [first_subword, second_subword]
- else:
- second_subword = word[: -len(first_subword)]
- new_word_list = [second_subword, first_subword]
- return new_word_list
-
- def _three_sandhi(self, word: str, finals: List[str]) -> List[str]:
- if len(word) == 2 and self._all_tone_three(finals):
- finals[0] = finals[0][:-1] + "2"
- elif len(word) == 3:
- word_list = self._split_word(word)
- if self._all_tone_three(finals):
- # disyllabic + monosyllabic, e.g. 蒙古/包
- if len(word_list[0]) == 2:
- finals[0] = finals[0][:-1] + "2"
- finals[1] = finals[1][:-1] + "2"
- # monosyllabic + disyllabic, e.g. 纸/老虎
- elif len(word_list[0]) == 1:
- finals[1] = finals[1][:-1] + "2"
- else:
- finals_list = [finals[: len(word_list[0])], finals[len(word_list[0]) :]]
- if len(finals_list) == 2:
- for i, sub in enumerate(finals_list):
- # e.g. 所有/人
- if self._all_tone_three(sub) and len(sub) == 2:
- finals_list[i][0] = finals_list[i][0][:-1] + "2"
- # e.g. 好/喜欢
- elif (
- i == 1
- and not self._all_tone_three(sub)
- and finals_list[i][0][-1] == "3"
- and finals_list[0][-1][-1] == "3"
- ):
- finals_list[0][-1] = finals_list[0][-1][:-1] + "2"
- finals = sum(finals_list, [])
- # split idiom into two words who's length is 2
- elif len(word) == 4:
- finals_list = [finals[:2], finals[2:]]
- finals = []
- for sub in finals_list:
- if self._all_tone_three(sub):
- sub[0] = sub[0][:-1] + "2"
- finals += sub
-
- return finals
-
- def _all_tone_three(self, finals: List[str]) -> bool:
- return all(x[-1] == "3" for x in finals)
-
- # merge "不" and the word behind it
- # if don't merge, "不" sometimes appears alone according to jieba, which may occur sandhi error
- def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
- new_seg = []
- last_word = ""
- for word, pos in seg:
- if last_word == "不":
- word = last_word + word
- if word != "不":
- new_seg.append((word, pos))
- last_word = word[:]
- if last_word == "不":
- new_seg.append((last_word, "d"))
- last_word = ""
- return new_seg
-
- # function 1: merge "一" and reduplication words in it's left and right, e.g. "听","一","听" ->"听一听"
- # function 2: merge single "一" and the word behind it
- # if don't merge, "一" sometimes appears alone according to jieba, which may occur sandhi error
- # e.g.
- # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')]
- # output seg: [['听一听', 'v']]
- def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
- new_seg = []
- # function 1
- for i, (word, pos) in enumerate(seg):
- if (
- i - 1 >= 0
- and word == "一"
- and i + 1 < len(seg)
- and seg[i - 1][0] == seg[i + 1][0]
- and seg[i - 1][1] == "v"
- ):
- new_seg[i - 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0]
- else:
- if (
- i - 2 >= 0
- and seg[i - 1][0] == "一"
- and seg[i - 2][0] == word
- and pos == "v"
- ):
- continue
- else:
- new_seg.append([word, pos])
- seg = new_seg
- new_seg = []
- # function 2
- for i, (word, pos) in enumerate(seg):
- if new_seg and new_seg[-1][0] == "一":
- new_seg[-1][0] = new_seg[-1][0] + word
- else:
- new_seg.append([word, pos])
- return new_seg
-
- # the first and the second words are all_tone_three
- def _merge_continuous_three_tones(
- self, seg: List[Tuple[str, str]]
- ) -> List[Tuple[str, str]]:
- new_seg = []
- sub_finals_list = [
- lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
- for (word, pos) in seg
- ]
- assert len(sub_finals_list) == len(seg)
- merge_last = [False] * len(seg)
- for i, (word, pos) in enumerate(seg):
- if (
- i - 1 >= 0
- and self._all_tone_three(sub_finals_list[i - 1])
- and self._all_tone_three(sub_finals_list[i])
- and not merge_last[i - 1]
- ):
- # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi
- if (
- not self._is_reduplication(seg[i - 1][0])
- and len(seg[i - 1][0]) + len(seg[i][0]) <= 3
- ):
- new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
- merge_last[i] = True
- else:
- new_seg.append([word, pos])
- else:
- new_seg.append([word, pos])
-
- return new_seg
-
- def _is_reduplication(self, word: str) -> bool:
- return len(word) == 2 and word[0] == word[1]
-
- # the last char of first word and the first char of second word is tone_three
- def _merge_continuous_three_tones_2(
- self, seg: List[Tuple[str, str]]
- ) -> List[Tuple[str, str]]:
- new_seg = []
- sub_finals_list = [
- lazy_pinyin(word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
- for (word, pos) in seg
- ]
- assert len(sub_finals_list) == len(seg)
- merge_last = [False] * len(seg)
- for i, (word, pos) in enumerate(seg):
- if (
- i - 1 >= 0
- and sub_finals_list[i - 1][-1][-1] == "3"
- and sub_finals_list[i][0][-1] == "3"
- and not merge_last[i - 1]
- ):
- # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi
- if (
- not self._is_reduplication(seg[i - 1][0])
- and len(seg[i - 1][0]) + len(seg[i][0]) <= 3
- ):
- new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
- merge_last[i] = True
- else:
- new_seg.append([word, pos])
- else:
- new_seg.append([word, pos])
- return new_seg
-
- def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
- new_seg = []
- for i, (word, pos) in enumerate(seg):
- if i - 1 >= 0 and word == "儿" and seg[i - 1][0] != "#":
- new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
- else:
- new_seg.append([word, pos])
- return new_seg
-
- def _merge_reduplication(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
- new_seg = []
- for i, (word, pos) in enumerate(seg):
- if new_seg and word == new_seg[-1][0]:
- new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
- else:
- new_seg.append([word, pos])
- return new_seg
-
- def pre_merge_for_modify(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
- seg = self._merge_bu(seg)
- try:
- seg = self._merge_yi(seg)
- except:
- print("_merge_yi failed")
- seg = self._merge_reduplication(seg)
- seg = self._merge_continuous_three_tones(seg)
- seg = self._merge_continuous_three_tones_2(seg)
- seg = self._merge_er(seg)
- return seg
-
- def modified_tone(self, word: str, pos: str, finals: List[str]) -> List[str]:
- finals = self._bu_sandhi(word, finals)
- finals = self._yi_sandhi(word, finals)
- finals = self._neural_sandhi(word, pos, finals)
- finals = self._three_sandhi(word, finals)
- return finals
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/filesize.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/filesize.py
deleted file mode 100644
index 99f118e20103174993b865cfb43ac6b6e00296a4..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/filesize.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# coding: utf-8
-"""Functions for reporting filesizes. Borrowed from https://github.com/PyFilesystem/pyfilesystem2
-
-The functions declared in this module should cover the different
-use cases needed to generate a string representation of a file size
-using several different units. Since there are many standards regarding
-file size units, three different functions have been implemented.
-
-See Also:
- * `Wikipedia: Binary prefix `_
-
-"""
-
-__all__ = ["decimal"]
-
-from typing import Iterable, List, Optional, Tuple
-
-
-def _to_str(
- size: int,
- suffixes: Iterable[str],
- base: int,
- *,
- precision: Optional[int] = 1,
- separator: Optional[str] = " ",
-) -> str:
- if size == 1:
- return "1 byte"
- elif size < base:
- return "{:,} bytes".format(size)
-
- for i, suffix in enumerate(suffixes, 2): # noqa: B007
- unit = base**i
- if size < unit:
- break
- return "{:,.{precision}f}{separator}{}".format(
- (base * size / unit),
- suffix,
- precision=precision,
- separator=separator,
- )
-
-
-def pick_unit_and_suffix(size: int, suffixes: List[str], base: int) -> Tuple[int, str]:
- """Pick a suffix and base for the given size."""
- for i, suffix in enumerate(suffixes):
- unit = base**i
- if size < unit * base:
- break
- return unit, suffix
-
-
-def decimal(
- size: int,
- *,
- precision: Optional[int] = 1,
- separator: Optional[str] = " ",
-) -> str:
- """Convert a filesize in to a string (powers of 1000, SI prefixes).
-
- In this convention, ``1000 B = 1 kB``.
-
- This is typically the format used to advertise the storage
- capacity of USB flash drives and the like (*256 MB* meaning
- actually a storage capacity of more than *256 000 000 B*),
- or used by **Mac OS X** since v10.6 to report file sizes.
-
- Arguments:
- int (size): A file size.
- int (precision): The number of decimal places to include (default = 1).
- str (separator): The string to separate the value from the units (default = " ").
-
- Returns:
- `str`: A string containing a abbreviated file size and units.
-
- Example:
- >>> filesize.decimal(30000)
- '30.0 kB'
- >>> filesize.decimal(30000, precision=2, separator="")
- '30.00kB'
-
- """
- return _to_str(
- size,
- ("kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"),
- 1000,
- precision=precision,
- separator=separator,
- )
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.py
deleted file mode 100644
index 40844ddeb8d47ff58a6af49ab35bad84e14f5721..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from ..common.optim import SGD as optimizer
-from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
-from ..common.data.coco import dataloader
-from ..common.models.mask_rcnn_fpn import model
-from ..common.train import train
-
-model.backbone.bottom_up.freeze_at = 2
-train.init_checkpoint = "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/export/torchscript_patch.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/export/torchscript_patch.py
deleted file mode 100644
index da9b324f1582e31d1a16d2fe462ac2989bea56ea..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/export/torchscript_patch.py
+++ /dev/null
@@ -1,406 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-import os
-import sys
-import tempfile
-from contextlib import ExitStack, contextmanager
-from copy import deepcopy
-from unittest import mock
-import torch
-from torch import nn
-
-# need some explicit imports due to https://github.com/pytorch/pytorch/issues/38964
-import detectron2 # noqa F401
-from detectron2.structures import Boxes, Instances
-from detectron2.utils.env import _import_file
-
-_counter = 0
-
-
-def _clear_jit_cache():
- from torch.jit._recursive import concrete_type_store
- from torch.jit._state import _jit_caching_layer
-
- concrete_type_store.type_store.clear() # for modules
- _jit_caching_layer.clear() # for free functions
-
-
-def _add_instances_conversion_methods(newInstances):
- """
- Add from_instances methods to the scripted Instances class.
- """
- cls_name = newInstances.__name__
-
- @torch.jit.unused
- def from_instances(instances: Instances):
- """
- Create scripted Instances from original Instances
- """
- fields = instances.get_fields()
- image_size = instances.image_size
- ret = newInstances(image_size)
- for name, val in fields.items():
- assert hasattr(ret, f"_{name}"), f"No attribute named {name} in {cls_name}"
- setattr(ret, name, deepcopy(val))
- return ret
-
- newInstances.from_instances = from_instances
-
-
-@contextmanager
-def patch_instances(fields):
- """
- A contextmanager, under which the Instances class in detectron2 is replaced
- by a statically-typed scriptable class, defined by `fields`.
- See more in `scripting_with_instances`.
- """
-
- with tempfile.TemporaryDirectory(prefix="detectron2") as dir, tempfile.NamedTemporaryFile(
- mode="w", encoding="utf-8", suffix=".py", dir=dir, delete=False
- ) as f:
- try:
- # Objects that use Instances should not reuse previously-compiled
- # results in cache, because `Instances` could be a new class each time.
- _clear_jit_cache()
-
- cls_name, s = _gen_instance_module(fields)
- f.write(s)
- f.flush()
- f.close()
-
- module = _import(f.name)
- new_instances = getattr(module, cls_name)
- _ = torch.jit.script(new_instances)
- # let torchscript think Instances was scripted already
- Instances.__torch_script_class__ = True
- # let torchscript find new_instances when looking for the jit type of Instances
- Instances._jit_override_qualname = torch._jit_internal._qualified_name(new_instances)
-
- _add_instances_conversion_methods(new_instances)
- yield new_instances
- finally:
- try:
- del Instances.__torch_script_class__
- del Instances._jit_override_qualname
- except AttributeError:
- pass
- sys.modules.pop(module.__name__)
-
-
-def _gen_instance_class(fields):
- """
- Args:
- fields (dict[name: type])
- """
-
- class _FieldType:
- def __init__(self, name, type_):
- assert isinstance(name, str), f"Field name must be str, got {name}"
- self.name = name
- self.type_ = type_
- self.annotation = f"{type_.__module__}.{type_.__name__}"
-
- fields = [_FieldType(k, v) for k, v in fields.items()]
-
- def indent(level, s):
- return " " * 4 * level + s
-
- lines = []
-
- global _counter
- _counter += 1
-
- cls_name = "ScriptedInstances{}".format(_counter)
-
- field_names = tuple(x.name for x in fields)
- extra_args = ", ".join([f"{f.name}: Optional[{f.annotation}] = None" for f in fields])
- lines.append(
- f"""
-class {cls_name}:
- def __init__(self, image_size: Tuple[int, int], {extra_args}):
- self.image_size = image_size
- self._field_names = {field_names}
-"""
- )
-
- for f in fields:
- lines.append(
- indent(2, f"self._{f.name} = torch.jit.annotate(Optional[{f.annotation}], {f.name})")
- )
-
- for f in fields:
- lines.append(
- f"""
- @property
- def {f.name}(self) -> {f.annotation}:
- # has to use a local for type refinement
- # https://pytorch.org/docs/stable/jit_language_reference.html#optional-type-refinement
- t = self._{f.name}
- assert t is not None, "{f.name} is None and cannot be accessed!"
- return t
-
- @{f.name}.setter
- def {f.name}(self, value: {f.annotation}) -> None:
- self._{f.name} = value
-"""
- )
-
- # support method `__len__`
- lines.append(
- """
- def __len__(self) -> int:
-"""
- )
- for f in fields:
- lines.append(
- f"""
- t = self._{f.name}
- if t is not None:
- return len(t)
-"""
- )
- lines.append(
- """
- raise NotImplementedError("Empty Instances does not support __len__!")
-"""
- )
-
- # support method `has`
- lines.append(
- """
- def has(self, name: str) -> bool:
-"""
- )
- for f in fields:
- lines.append(
- f"""
- if name == "{f.name}":
- return self._{f.name} is not None
-"""
- )
- lines.append(
- """
- return False
-"""
- )
-
- # support method `to`
- none_args = ", None" * len(fields)
- lines.append(
- f"""
- def to(self, device: torch.device) -> "{cls_name}":
- ret = {cls_name}(self.image_size{none_args})
-"""
- )
- for f in fields:
- if hasattr(f.type_, "to"):
- lines.append(
- f"""
- t = self._{f.name}
- if t is not None:
- ret._{f.name} = t.to(device)
-"""
- )
- else:
- # For now, ignore fields that cannot be moved to devices.
- # Maybe can support other tensor-like classes (e.g. __torch_function__)
- pass
- lines.append(
- """
- return ret
-"""
- )
-
- # support method `getitem`
- none_args = ", None" * len(fields)
- lines.append(
- f"""
- def __getitem__(self, item) -> "{cls_name}":
- ret = {cls_name}(self.image_size{none_args})
-"""
- )
- for f in fields:
- lines.append(
- f"""
- t = self._{f.name}
- if t is not None:
- ret._{f.name} = t[item]
-"""
- )
- lines.append(
- """
- return ret
-"""
- )
-
- # support method `cat`
- # this version does not contain checks that all instances have same size and fields
- none_args = ", None" * len(fields)
- lines.append(
- f"""
- def cat(self, instances: List["{cls_name}"]) -> "{cls_name}":
- ret = {cls_name}(self.image_size{none_args})
-"""
- )
- for f in fields:
- lines.append(
- f"""
- t = self._{f.name}
- if t is not None:
- values: List[{f.annotation}] = [x.{f.name} for x in instances]
- if torch.jit.isinstance(t, torch.Tensor):
- ret._{f.name} = torch.cat(values, dim=0)
- else:
- ret._{f.name} = t.cat(values)
-"""
- )
- lines.append(
- """
- return ret"""
- )
-
- # support method `get_fields()`
- lines.append(
- """
- def get_fields(self) -> Dict[str, Tensor]:
- ret = {}
- """
- )
- for f in fields:
- if f.type_ == Boxes:
- stmt = "t.tensor"
- elif f.type_ == torch.Tensor:
- stmt = "t"
- else:
- stmt = f'assert False, "unsupported type {str(f.type_)}"'
- lines.append(
- f"""
- t = self._{f.name}
- if t is not None:
- ret["{f.name}"] = {stmt}
- """
- )
- lines.append(
- """
- return ret"""
- )
- return cls_name, os.linesep.join(lines)
-
-
-def _gen_instance_module(fields):
- # TODO: find a more automatic way to enable import of other classes
- s = """
-from copy import deepcopy
-import torch
-from torch import Tensor
-import typing
-from typing import *
-
-import detectron2
-from detectron2.structures import Boxes, Instances
-
-"""
-
- cls_name, cls_def = _gen_instance_class(fields)
- s += cls_def
- return cls_name, s
-
-
-def _import(path):
- return _import_file(
- "{}{}".format(sys.modules[__name__].__name__, _counter), path, make_importable=True
- )
-
-
-@contextmanager
-def patch_builtin_len(modules=()):
- """
- Patch the builtin len() function of a few detectron2 modules
- to use __len__ instead, because __len__ does not convert values to
- integers and therefore is friendly to tracing.
-
- Args:
- modules (list[stsr]): names of extra modules to patch len(), in
- addition to those in detectron2.
- """
-
- def _new_len(obj):
- return obj.__len__()
-
- with ExitStack() as stack:
- MODULES = [
- "detectron2.modeling.roi_heads.fast_rcnn",
- "detectron2.modeling.roi_heads.mask_head",
- "detectron2.modeling.roi_heads.keypoint_head",
- ] + list(modules)
- ctxs = [stack.enter_context(mock.patch(mod + ".len")) for mod in MODULES]
- for m in ctxs:
- m.side_effect = _new_len
- yield
-
-
-def patch_nonscriptable_classes():
- """
- Apply patches on a few nonscriptable detectron2 classes.
- Should not have side-effects on eager usage.
- """
- # __prepare_scriptable__ can also be added to models for easier maintenance.
- # But it complicates the clean model code.
-
- from detectron2.modeling.backbone import ResNet, FPN
-
- # Due to https://github.com/pytorch/pytorch/issues/36061,
- # we change backbone to use ModuleList for scripting.
- # (note: this changes param names in state_dict)
-
- def prepare_resnet(self):
- ret = deepcopy(self)
- ret.stages = nn.ModuleList(ret.stages)
- for k in self.stage_names:
- delattr(ret, k)
- return ret
-
- ResNet.__prepare_scriptable__ = prepare_resnet
-
- def prepare_fpn(self):
- ret = deepcopy(self)
- ret.lateral_convs = nn.ModuleList(ret.lateral_convs)
- ret.output_convs = nn.ModuleList(ret.output_convs)
- for name, _ in self.named_children():
- if name.startswith("fpn_"):
- delattr(ret, name)
- return ret
-
- FPN.__prepare_scriptable__ = prepare_fpn
-
- # Annotate some attributes to be constants for the purpose of scripting,
- # even though they are not constants in eager mode.
- from detectron2.modeling.roi_heads import StandardROIHeads
-
- if hasattr(StandardROIHeads, "__annotations__"):
- # copy first to avoid editing annotations of base class
- StandardROIHeads.__annotations__ = deepcopy(StandardROIHeads.__annotations__)
- StandardROIHeads.__annotations__["mask_on"] = torch.jit.Final[bool]
- StandardROIHeads.__annotations__["keypoint_on"] = torch.jit.Final[bool]
-
-
-# These patches are not supposed to have side-effects.
-patch_nonscriptable_classes()
-
-
-@contextmanager
-def freeze_training_mode(model):
- """
- A context manager that annotates the "training" attribute of every submodule
- to constant, so that the training codepath in these modules can be
- meta-compiled away. Upon exiting, the annotations are reverted.
- """
- classes = {type(x) for x in model.modules()}
- # __constants__ is the old way to annotate constants and not compatible
- # with __annotations__ .
- classes = {x for x in classes if not hasattr(x, "__constants__")}
- for cls in classes:
- cls.__annotations__["training"] = torch.jit.Final[bool]
- yield
- for cls in classes:
- cls.__annotations__["training"] = bool
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/layers/test_nms_rotated.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/layers/test_nms_rotated.py
deleted file mode 100644
index 4b45384892ab2a7cb20871cf19374f1bd08907ce..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/layers/test_nms_rotated.py
+++ /dev/null
@@ -1,172 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-from __future__ import absolute_import, division, print_function, unicode_literals
-import numpy as np
-import unittest
-from copy import deepcopy
-import torch
-from torchvision import ops
-
-from detectron2.layers import batched_nms, batched_nms_rotated, nms_rotated
-from detectron2.utils.testing import random_boxes
-
-
-def nms_edit_distance(keep1, keep2):
- """
- Compare the "keep" result of two nms call.
- They are allowed to be different in terms of edit distance
- due to floating point precision issues, e.g.,
- if a box happen to have an IoU of 0.5 with another box,
- one implentation may choose to keep it while another may discard it.
- """
- keep1, keep2 = keep1.cpu(), keep2.cpu()
- if torch.equal(keep1, keep2):
- # they should be equal most of the time
- return 0
- keep1, keep2 = tuple(keep1), tuple(keep2)
- m, n = len(keep1), len(keep2)
-
- # edit distance with DP
- f = [np.arange(n + 1), np.arange(n + 1)]
- for i in range(m):
- cur_row = i % 2
- other_row = (i + 1) % 2
- f[other_row][0] = i + 1
- for j in range(n):
- f[other_row][j + 1] = (
- f[cur_row][j]
- if keep1[i] == keep2[j]
- else min(min(f[cur_row][j], f[cur_row][j + 1]), f[other_row][j]) + 1
- )
- return f[m % 2][n]
-
-
-class TestNMSRotated(unittest.TestCase):
- def reference_horizontal_nms(self, boxes, scores, iou_threshold):
- """
- Args:
- box_scores (N, 5): boxes in corner-form and probabilities.
- (Note here 5 == 4 + 1, i.e., 4-dim horizontal box + 1-dim prob)
- iou_threshold: intersection over union threshold.
- Returns:
- picked: a list of indexes of the kept boxes
- """
- picked = []
- _, indexes = scores.sort(descending=True)
- while len(indexes) > 0:
- current = indexes[0]
- picked.append(current.item())
- if len(indexes) == 1:
- break
- current_box = boxes[current, :]
- indexes = indexes[1:]
- rest_boxes = boxes[indexes, :]
- iou = ops.box_iou(rest_boxes, current_box.unsqueeze(0)).squeeze(1)
- indexes = indexes[iou <= iou_threshold]
-
- return torch.as_tensor(picked)
-
- def _create_tensors(self, N, device="cpu"):
- boxes = random_boxes(N, 200, device=device)
- scores = torch.rand(N, device=device)
- return boxes, scores
-
- def test_batched_nms_rotated_0_degree_cpu(self, device="cpu"):
- N = 2000
- num_classes = 50
- boxes, scores = self._create_tensors(N, device=device)
- idxs = torch.randint(0, num_classes, (N,))
- rotated_boxes = torch.zeros(N, 5, device=device)
- rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
- rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
- rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
- rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
- err_msg = "Rotated NMS with 0 degree is incompatible with horizontal NMS for IoU={}"
- for iou in [0.2, 0.5, 0.8]:
- backup = boxes.clone()
- keep_ref = batched_nms(boxes, scores, idxs, iou)
- assert torch.allclose(boxes, backup), "boxes modified by batched_nms"
- backup = rotated_boxes.clone()
- keep = batched_nms_rotated(rotated_boxes, scores, idxs, iou)
- assert torch.allclose(
- rotated_boxes, backup
- ), "rotated_boxes modified by batched_nms_rotated"
- # Occasionally the gap can be large if there are many IOU on the threshold boundary
- self.assertLessEqual(nms_edit_distance(keep, keep_ref), 5, err_msg.format(iou))
-
- @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
- def test_batched_nms_rotated_0_degree_cuda(self):
- self.test_batched_nms_rotated_0_degree_cpu(device="cuda")
-
- def test_nms_rotated_0_degree_cpu(self, device="cpu"):
- N = 1000
- boxes, scores = self._create_tensors(N, device=device)
- rotated_boxes = torch.zeros(N, 5, device=device)
- rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
- rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
- rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
- rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
- err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}"
- for iou in [0.2, 0.5, 0.8]:
- keep_ref = self.reference_horizontal_nms(boxes, scores, iou)
- keep = nms_rotated(rotated_boxes, scores, iou)
- self.assertLessEqual(nms_edit_distance(keep, keep_ref), 1, err_msg.format(iou))
-
- @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
- def test_nms_rotated_0_degree_cuda(self):
- self.test_nms_rotated_0_degree_cpu(device="cuda")
-
- def test_nms_rotated_90_degrees_cpu(self):
- N = 1000
- boxes, scores = self._create_tensors(N)
- rotated_boxes = torch.zeros(N, 5)
- rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
- rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
- # Note for rotated_boxes[:, 2] and rotated_boxes[:, 3]:
- # widths and heights are intentionally swapped here for 90 degrees case
- # so that the reference horizontal nms could be used
- rotated_boxes[:, 2] = boxes[:, 3] - boxes[:, 1]
- rotated_boxes[:, 3] = boxes[:, 2] - boxes[:, 0]
-
- rotated_boxes[:, 4] = torch.ones(N) * 90
- err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}"
- for iou in [0.2, 0.5, 0.8]:
- keep_ref = self.reference_horizontal_nms(boxes, scores, iou)
- keep = nms_rotated(rotated_boxes, scores, iou)
- self.assertLessEqual(nms_edit_distance(keep, keep_ref), 1, err_msg.format(iou))
-
- def test_nms_rotated_180_degrees_cpu(self):
- N = 1000
- boxes, scores = self._create_tensors(N)
- rotated_boxes = torch.zeros(N, 5)
- rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0
- rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0
- rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
- rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
- rotated_boxes[:, 4] = torch.ones(N) * 180
- err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}"
- for iou in [0.2, 0.5, 0.8]:
- keep_ref = self.reference_horizontal_nms(boxes, scores, iou)
- keep = nms_rotated(rotated_boxes, scores, iou)
- self.assertLessEqual(nms_edit_distance(keep, keep_ref), 1, err_msg.format(iou))
-
-
-class TestScriptable(unittest.TestCase):
- def setUp(self):
- class TestingModule(torch.nn.Module):
- def forward(self, boxes, scores, threshold):
- return nms_rotated(boxes, scores, threshold)
-
- self.module = TestingModule()
-
- def test_scriptable_cpu(self):
- m = deepcopy(self.module).cpu()
- _ = torch.jit.script(m)
-
- @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available")
- def test_scriptable_cuda(self):
- m = deepcopy(self.module).cuda()
- _ = torch.jit.script(m)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/spaces/Benson/text-generation/Examples/Com.p1.chomp Sms Pro Apk.md b/spaces/Benson/text-generation/Examples/Com.p1.chomp Sms Pro Apk.md
deleted file mode 100644
index ebc6e2370e89788c8ee7a9811f646090e177db95..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Com.p1.chomp Sms Pro Apk.md
+++ /dev/null
@@ -1,104 +0,0 @@
-
-
¿Qué es com.p1.chomp sms pro apk?
-
Si usted está buscando una forma rápida, fácil y divertida de enviar y recibir mensajes de texto, entonces es posible que desee probar com.p1.chomp sms pro apk. Esta es una aplicación de mensajería popular que le permite personalizar sus mensajes con varios temas, fuentes, colores, emojis, pegatinas, GIF y más. También puede programar mensajes, hacer copias de seguridad y restaurar sus mensajes, bloquear spam y mensajes no deseados, y disfrutar de muchas otras características que hacen que los mensajes de texto sean más agradables.
-
¿Por qué usar com.p1.chomp sms pro apk?
-
Hay muchas razones por las que es posible que desee utilizar com.p1.chomp sms pro apk sobre otras aplicaciones de mensajería. Estos son algunos de ellos:
Es gratis y sin publicidad. No tienes que pagar nada ni lidiar con anuncios molestos para usar la aplicación.
-
Es compatible con la mayoría de los dispositivos Android. Puede usar la aplicación en cualquier dispositivo que ejecute Android 4.1 o superior.
-
Es fácil de usar. Puede configurar la aplicación en minutos y empezar a enviar mensajes de texto de inmediato.
-
Es personalizable. Puede elegir entre cientos de temas, fuentes, colores y notificaciones para hacer que sus mensajes se vean únicos.
-
Es divertido. Puedes expresarte con emojis, pegatinas, GIF y otros medios a los que puedes acceder desde el teclado de la aplicación.
-
Es inteligente. Puede programar mensajes, hacer copias de seguridad y restaurar sus mensajes, bloquear el spam y los mensajes no deseados, y usar otras funciones que hacen que los mensajes de texto sean más convenientes.
-
-
Cómo descargar e instalar com.p1.chomp sms pro apk?
-
Para descargar e instalar com.p1.chomp sms pro apk en su dispositivo, debe seguir estos pasos:
Espera a que termine la descarga y luego abre el archivo.
-
Si ves un mensaje de advertencia que dice "Instalar bloqueado", ve a la configuración de tu dispositivo y habilita "Fuentes desconocidas".
-
Toque en "Instalar" y espere a que la instalación se complete.
-
-
-
Cómo personalizar sus mensajes con com.p1.chomp sms pro apk?
-
Para personalizar sus mensajes con com.p1.chomp sms pro apk, es necesario hacer lo siguiente:
-
-
Abra la aplicación y toque en el icono del menú (tres líneas horizontales) en la esquina superior izquierda.
Seleccione "Configuración" y luego "Personalizar apariencia".
-
Aquí puede elegir entre varias opciones para cambiar la apariencia de sus mensajes, como tema, fuente, color, estilo de burbuja, icono de notificación y más.
-
Toque en la opción que desea cambiar y seleccione su opción preferida.
-
Toque en "Guardar" y luego en "Aceptar" para aplicar los cambios.
-
-
Cómo utilizar emojis, pegatinas y GIF con com.p1.chomp sms pro apk?
-
Para usar emojis, pegatinas y GIF con com.p1.chomp sms pro apk, debe hacer lo siguiente:
-
-
Abra la aplicación y toque en el icono "+" en la esquina inferior izquierda del teclado.
-
Verá un menú con diferentes opciones para agregar medios a sus mensajes, como emojis, pegatinas, GIF, fotos, videos, notas de voz y más.
-
Toque en la opción que desea utilizar y navegue a través de las opciones disponibles.
-
Toque en el medio que desea enviar y se añadirá a su mensaje.
-
Toque en el botón "Enviar" para enviar su mensaje con los medios de comunicación.
-
-
Cómo programar mensajes con com.p1.chomp sms pro apk?
-
Para programar mensajes con com.p1.chomp sms pro apk, debe hacer lo siguiente:
-
-
Abra la aplicación y toque en el "Nuevo mensaje" botón en la esquina inferior derecha.
-
Escribe el número o nombre del destinatario y escribe tu mensaje.
-
Toque en el icono del reloj en la esquina superior derecha del teclado.
-
Verá un menú con diferentes opciones para programar su mensaje, como más tarde hoy, mañana, la próxima semana o la fecha y hora personalizadas.
-
Toque en la opción que desea utilizar y confirme su elección.
-
-
Cómo hacer copias de seguridad y restaurar sus mensajes con com.p1.chomp sms pro apk?
-
Para respaldar y restaurar sus mensajes con com.p1.chomp sms pro apk, debe hacer lo siguiente:
-
-
Abra la aplicación y toque en el icono del menú (tres líneas horizontales) en la esquina superior izquierda.
-
Seleccione "Configuración" y luego "Copia de seguridad & Restaurar".
-
Aquí puede elegir hacer copias de seguridad de sus mensajes en la nube o en su dispositivo, así como restaurar sus mensajes desde la nube o desde su dispositivo.
-
Toque en la opción que desea utilizar y siga las instrucciones en la pantalla.
-
Necesitará iniciar sesión con su cuenta de Google para usar el servicio en la nube.
-
Tus mensajes serán respaldados o restaurados según tu elección.
-
-
Cómo bloquear spam y mensajes no deseados con com.p1.chomp sms pro apk?
-
Para bloquear spam y mensajes no deseados con com.p1.chomp sms pro apk, debe hacer lo siguiente:
-
-
-
Abra la aplicación y toque en el mensaje que desea bloquear.
-
Toque en el icono del menú (tres puntos verticales) en la esquina superior derecha del mensaje.
-
Seleccione "Bloquear" y luego "OK".
-
El mensaje se moverá a la carpeta "Bloqueado" y no recibirá más mensajes de ese número o contacto.
-
También puede agregar números o contactos a su lista negra manualmente yendo a "Configuración" y luego "Lista negra".
-
También puede habilitar el modo de privacidad yendo a "Configuración" y luego "Privacidad". Esto ocultará sus notificaciones y mensajes de miradas indiscretas.
-
-
¿Cuáles son los pros y los contras de com.p1.chomp sms pro apk?
-
Como cualquier otra aplicación, com.p1.chomp sms pro apk tiene sus pros y contras. Aquí están algunos de ellos:
-
-
Pros
Contras
-
Libre y sin anuncios
Requiere conexión a Internet
-
Compatible con la mayoría de dispositivos Android
No disponible para dispositivos iOS
-
-
Personalizable
Puede consumir más batería o memoria
-
Diversión
Puede que no soporte algunos formatos de medios
-
Smart
Puede que no funcione con algunos operadores o redes
-
-
Si usted está buscando algunas alternativas a com.p1.chomp sms pro apk, puede probar estas aplicaciones:
-
-
Textra SMS: Una aplicación de mensajería simple y rápida que también te permite personalizar tus mensajes con temas, emojis, GIF y más.
-
Pulse SMS: Una aplicación de mensajería potente y segura que también te permite sincronizar tus mensajes en todos tus dispositivos, incluido tu ordenador.
-
Mood Messenger: Una aplicación de mensajería elegante e inteligente que también le permite enviar emojis animados, mensajes de voz, ubicación y más.
-
-
Conclusión
-
En conclusión, com.p1.chomp sms pro apk es una gran aplicación de mensajería que ofrece muchas características y opciones para hacer mensajes de texto más divertido y conveniente. Puede descargar e instalar la aplicación de forma gratuita y disfrutar de la personalización de sus mensajes con temas, fuentes, colores, emojis, pegatinas, GIF, y más. También puede programar mensajes, hacer copias de seguridad y restaurar sus mensajes, bloquear el spam y los mensajes no deseados, y usar otras funciones que hacen que los mensajes de texto sean más inteligentes. Sin embargo, también debe ser consciente de los contras y limitaciones de la aplicación, tales como requerir conexión a Internet, no estar disponible para dispositivos iOS, tener algunos errores o problemas técnicos, consumir más batería o memoria, no es compatible con algunos formatos de medios, y no trabajar con algunos operadores o redes. También puede probar algunas alternativas a com.p1.chomp sms pro apk si desea explorar otras aplicaciones de mensajería. Esperamos que este artículo le ha ayudado a aprender más acerca de com.p1.chomp sms pro apk y cómo usarlo. Feliz mensajes de texto!
-
Preguntas frecuentes (preguntas frecuentes)
-
-
Lo que es com.p1.chomp sms pro apk?
-
-
Cómo puedo descargar e instalar com.p1.chomp sms pro apk?
A: Para descargar e instalar com.p1.chomp sms pro apk en su dispositivo, debe ir a https://apkdone.com/chomp-sms/ y hacer clic en el botón "Descargar APK". Luego, debe abrir el archivo y tocar en "Instalar". Es posible que deba habilitar "Fuentes desconocidas" en la configuración de su dispositivo para instalar la aplicación. Una vez completada la instalación, puedes tocar en "Abrir" y disfrutar usando la aplicación.
-
¿Cómo puedo personalizar mis mensajes con com.p1.chomp sms pro apk?
-
A: Para personalizar sus mensajes con com.p1.chomp sms pro apk, es necesario abrir la aplicación y toque en el icono del menú (tres líneas horizontales) en la esquina superior izquierda. Luego, debe seleccionar "Configuración" y luego "Personalizar apariencia". Aquí, puede elegir entre varias opciones para cambiar la apariencia de sus mensajes, como tema, fuente, color, estilo de burbuja, icono de notificación y más. Puede pulsar en la opción que desea cambiar y seleccionar su opción preferida. Puede pulsar en "Guardar" y luego "Aceptar" para aplicar los cambios.
-
¿Cómo puedo usar emojis, pegatinas y GIF con com.p1.chomp sms pro apk?
-
A: Para utilizar emojis, pegatinas y GIF con com.p1.chomp sms pro apk, es necesario abrir la aplicación y toque en el "+" icono en la esquina inferior izquierda del teclado. Verá un menú con diferentes opciones para agregar medios a sus mensajes, como emojis, pegatinas, GIF, fotos, videos, notas de voz y más. Puede tocar en la opción que desea utilizar y navegar a través de las opciones disponibles. Puede pulsar en el medio que desea enviar y se añadirá a su mensaje. Puede pulsar en el botón "Enviar" para enviar su mensaje con el medio.
-
¿Cómo puedo programar mensajes con com.p1.chomp sms pro apk?
-
-
¿Cómo hago copia de seguridad y restaurar mis mensajes con com.p1.chomp sms pro apk?
-
A: Para copia de seguridad y restaurar sus mensajes con com.p1.chomp sms pro apk, es necesario abrir la aplicación y toque en el icono del menú (tres líneas horizontales) en la esquina superior izquierda. Luego, debe seleccionar "Configuración" y luego "Copia de seguridad y restauración". Aquí, puede elegir hacer una copia de seguridad de sus mensajes en la nube o en su dispositivo, así como restaurar sus mensajes desde la nube o desde su dispositivo. Puede tocar en la opción que desea utilizar y siga las instrucciones en la pantalla. Deberá iniciar sesión con su cuenta de Google para utilizar el servicio en la nube. Se realizará una copia de seguridad de sus mensajes o se restaurarán según su elección.
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descargar Canciones De M Kumaran Hijo De Mahalakshmi.md b/spaces/Benson/text-generation/Examples/Descargar Canciones De M Kumaran Hijo De Mahalakshmi.md
deleted file mode 100644
index 0172791547006b1315f6acf2b38f8a815de1dd97..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar Canciones De M Kumaran Hijo De Mahalakshmi.md
+++ /dev/null
@@ -1,102 +0,0 @@
-
-
Cómo descargar canciones de M. Kumaran Son of Mahalakshmi
-
M. Kumaran Son of Mahalakshmi es una película tamil de 2004 dirigida por M. Raja y protagonizada por Jayam Ravi, Asin, Nadhiya y Prakash Raj. La película es un remake de la película telugu Amma Nanna O Tamila Ammayi y cuenta la historia de Kumaran, un kickboxer que va a Malasia para encontrarse con su padre después de la muerte de su madre. La película fue un éxito de taquilla y una de las películas más rentables de 2004.
-
Una de las razones del éxito de la película fue su banda sonora, compuesta por Srikanth Deva, hijo del veterano director musical Deva. La banda sonora consta de seis canciones que van desde el rock, folk, melodía, y géneros de rap. Las canciones cuentan con varios cantantes como Shankar Mahadevan, Karthik, Sadhana Sargam, Tippu, Anuradha Sriram, Ranjith, Premji Amaren y el propio Srikanth Deva. Las canciones son pegadizas, emocionales y motivacionales, y se adaptan perfectamente al tema de la película.
-
descargar canciones de m kumaran hijo de mahalakshmi
Si eres un fan de las canciones de M. Kumaran Son of Mahalakshmi y quieres descargarlas en tu dispositivo, tienes dos opciones: puedes descargarlas gratis o por una tarifa. En este artículo, te mostraremos cómo hacer ambas cosas.
-
Por qué deberías escuchar canciones de M. Kumaran hijo de Mahalakshmi
-
Las canciones están compuestas por Srikanth Deva, un director de música popular en el cine tamil
-
Srikanth Deva es uno de los directores de música más prolíficos en el cine tamil, habiendo compuesto música para más de 100 películas desde su debut en 2002. Es conocido por su versatilidad y capacidad para crear canciones que atraen a diferentes audiencias. Ha trabajado con muchos actores y directores principales en el cine tamil, como Vij
Las canciones cuentan con varios cantantes y géneros, como rock, folk, melodía y rap
-
-
Estas canciones no solo son agradables de escuchar, sino también significativas y relevantes para la historia y los personajes. Mejoran el estado de ánimo y la emoción de la película y la hacen más atractiva y memorable.
-
Las canciones son pegadizas, emocionales y motivadoras, y se adaptan al tema de la película
-
Las canciones de M. Kumaran Son of Mahalakshmi no son solo canciones aleatorias o de relleno. Son pegadizos, emocionales y motivadores, y se adaptan al tema de la película. La película trata sobre el viaje de Kumaran para encontrar su identidad y su lugar en el mundo, así como su relación con su padre, su madre, su novia y sus amigos. Las canciones reflejan estos aspectos y transmiten el mensaje de la película.
-
Por ejemplo, la canción "Amma Nee Sumandha" es un homenaje a la madre de Kumaran, quien lo crió sin ayuda y le enseñó a ser fuerte e independiente. La canción "Rakkamma" es una canción motivacional que anima a Kumaran a superar sus miedos y desafíos y lograr sus sueños. La canción "Aethiree" es una canción pegadiza que muestra la amistad y la diversión entre Kumaran y sus amigos. La canción "Unnai Ninaithu" es una canción emocional que revela los sentimientos de Kumaran por su padre, quien lo abandonó cuando era joven.
-
Estas canciones no solo son pegadizas, emocionales y motivadoras, sino que también se adaptan al tema de la película. Transmiten el mensaje de la película y la hacen más impactante e inspiradora.
-
Cómo descargar canciones de M. Kumaran Son of Mahalakshmi gratis
-
Utilice un sitio web o aplicación confiable y legal que ofrece descargas gratuitas de canciones tamiles
-
-
Algunos de los sitios web o aplicaciones confiables y legales que ofrecen descargas gratuitas de canciones tamiles son:
-
-
Gaana.com: Esta es una de las plataformas de streaming de música online más grandes de la India, con más de 200 millones de usuarios mensuales. Ofrece descargas gratuitas de canciones tamiles, así como de otros idiomas y géneros. Puedes buscar canciones de M. Kumaran Son of Mahalakshmi escribiendo el nombre de la película o el nombre de la canción en la barra de búsqueda. También puede navegar a través de las categorías y listas de reproducción para encontrar las canciones que desea. Puede descargar las canciones haciendo clic en el icono de descarga junto al título de la canción. Puede elegir el formato y la calidad que prefiera, como MP3, AAC o HD. También puede escuchar las canciones en línea o sin conexión en su dispositivo.
-
Hungama.com: Esta es otra popular plataforma de streaming de música en línea en la India, con más de 150 millones de usuarios mensuales. También ofrece descargas gratuitas de canciones tamiles, así como de otros idiomas y géneros. Puedes buscar canciones de M. Kumaran Son of Mahalakshmi escribiendo el nombre de la película o el nombre de la canción en la barra de búsqueda. También puede navegar a través de las categorías y listas de reproducción para encontrar las canciones que desea. Puede descargar las canciones haciendo clic en el icono de descarga junto al título de la canción. Puede elegir el formato y la calidad que prefiera, como MP3, AAC o HD. También puede escuchar las canciones en línea o sin conexión en su dispositivo.
-
-
-
Estos son algunos de los sitios web o aplicaciones confiables y legales que ofrecen descargas gratuitas de canciones tamiles. Sin embargo, siempre debe comprobar los términos y condiciones de cada sitio web o aplicación antes de descargar cualquier canción, y asegúrese de no violar ninguna ley o política.
-
-
Cómo descargar canciones de M. Kumaran Son of Mahalakshmi por una tarifa
-
Utilice un servicio de streaming de pago o tienda en línea que ofrece descargas de alta calidad de canciones Tamil
-
Si desea descargar canciones de M. Kumaran Son of Mahalakshmi por una tarifa, debe usar un servicio de transmisión pagado o una tienda en línea que ofrece descargas de alta calidad de canciones tamiles. Hay muchos servicios de streaming de pago y tiendas en línea que ofrecen descargas de alta calidad de canciones tamiles, pero no todos ellos valen su dinero o tiempo. Algunos de ellos pueden cobrarle demasiado o muy poco, algunos de ellos pueden tener un mal servicio al cliente o soporte técnico, algunos de ellos pueden tener opciones o características limitadas, y algunos de ellos pueden tener productos de baja calidad o falsos. Por lo tanto, debe tener cuidado y elegir un servicio o tienda que vale la pena su dinero y tiempo, y que proporciona productos y servicios de alta calidad.
-
Algunos de los servicios de streaming de pago y tiendas en línea que ofrecen descargas de alta calidad de canciones tamiles son:
-
-
-
iTunes: Esta es una de las tiendas de música en línea más populares y ampliamente utilizadas del mundo, con más de 60 millones de canciones disponibles para comprar y descargar. Ofrece descargas de alta calidad de canciones tamiles, así como otros idiomas y géneros. Puedes buscar canciones de M. Kumaran Son of Mahalakshmi escribiendo el nombre de la película o el nombre de la canción en la barra de búsqueda. También puede navegar a través de las categorías y listas de reproducción para encontrar las canciones que desea. Puede descargar las canciones haciendo clic en el botón comprar junto al título de la canción. Puede elegir el formato y la calidad que prefiera, como MP3, AAC o HD. También puede escuchar las canciones en línea o sin conexión en su dispositivo. Sin embargo, necesitas registrarte para una cuenta y pagar por cada canción que quieras descargar. El precio de cada canción varía de $0.69 a $1.29, dependiendo de la popularidad y la demanda de la canción.
-
Saavn: Esta es una de las plataformas de streaming de música más populares y ampliamente utilizadas en la India, con más de 100 millones de usuarios mensuales. Ofrece descargas de alta calidad de canciones tamiles, así como otros idiomas y géneros. Puedes buscar canciones de M. Kumaran Son of Mahalakshmi escribiendo el nombre de la película o el nombre de la canción en la barra de búsqueda. También puede navegar a través de las categorías y listas de reproducción para encontrar las canciones que desea. Puede descargar las canciones haciendo clic en el icono de descarga junto al título de la canción. Puede elegir el formato y la calidad que prefiera, como MP3, AAC o HD. También puede escuchar las canciones en línea o sin conexión en su dispositivo. Sin embargo, debe registrarse para obtener una cuenta y pagar una suscripción para acceder a la función de descarga. Los planes de suscripción varían de $1.99 a $9.99 por mes, dependiendo de las características y beneficios que desee.
-
-
-
Cómo disfrutar de las canciones de M. Kumaran hijo de Mahalakshmi después de descargarlas
-
Transfiera las canciones a su reproductor de música o dispositivo preferido
-
Después de descargar canciones de M. Kumaran Son of Mahalakshmi, necesitas transferirlas a tu reproductor de música o dispositivo preferido, para que puedas disfrutarlas en cualquier momento y en cualquier lugar que desees. Hay diferentes formas de transferir las canciones, dependiendo de la fuente y el destino de la transferencia.
-
Por ejemplo, si has descargado las canciones de Gaana.com o Hungama.com, puedes transferirlas a tu smartphone o tablet mediante un cable USB o una conexión inalámbrica. Si los has descargado de iTunes, puedes transferirlos a tu iPhone, iPad, iPod o Mac usando iTunes Sync o iCloud Music Library. Si los ha descargado desde Saavn, puede transferirlos a su smartphone o tableta mediante un cable USB o una conexión inalámbrica.
-
Siempre debe seguir las instrucciones y directrices de cada sitio web o aplicación al transferir las canciones, y asegúrese de no perder ni dañar ningún archivo durante el proceso.
-
Crear una lista de reproducción de sus canciones favoritas de la película
-
Después de transferir canciones de M. Kumaran Son of Mahalakshmi a su reproductor de música o dispositivo preferido, puede crear una lista de reproducción de sus canciones favoritas de la película. Una lista de reproducción es una colección de canciones que puedes reproducir en modo secuencial o aleatorio. Crear una lista de reproducción de tus canciones favoritas de la película puede ayudarte a disfrutarlas más y organizarlas mejor. También puede compartir su lista de reproducción con sus amigos o familiares, o escuchar las listas de reproducción de otras personas de la misma película.
-
-
Siempre debe seguir las instrucciones y directrices de cada reproductor de música o dispositivo al crear una lista de reproducción, y asegúrese de guardar y actualizar su lista de reproducción con regularidad.
-
Escuchar las canciones en cualquier momento y en cualquier lugar que desee
-
Después de crear una lista de reproducción de tus canciones favoritas de M. Kumaran Son of Mahalakshmi, puedes escuchar las canciones en cualquier momento y en cualquier lugar que quieras. Puede escuchar las canciones en línea o fuera de línea, dependiendo de su conexión a Internet y plan de datos. También puede ajustar el volumen, saltar, repetir o barajar las canciones, dependiendo de su preferencia y estado de ánimo. También puedes cantar, bailar o simplemente relajarte y disfrutar de las canciones.
-
Escuchar canciones de M. Kumaran Son of Mahalakshmi puede hacerte sentir feliz, triste, emocionado, nostálgico o inspirado, dependiendo de la canción y la situación. Las canciones también pueden recordarte la película y sus personajes, y hacerte apreciar más la historia y el mensaje. Las canciones también pueden ayudarle a aprender más sobre la cultura y el idioma tamil, y enriquecer su conocimiento y experiencia.
-
Conclusión
-
M. Kumaran Son of Mahalakshmi es una película tamil de 2004 que tiene una gran banda sonora compuesta por Srikanth Deva. La banda sonora consta de seis canciones que cuentan con varios cantantes y géneros, como rock, folk, melodía y rap. Las canciones son pegadizas, emocionales y motivacionales, y se adaptan perfectamente al tema de la película.
-
-
Después de descargar canciones de M. Kumaran Son of Mahalakshmi a tu dispositivo, puedes disfrutarlas en cualquier momento y en cualquier lugar que quieras. Puede transferir las canciones a su reproductor de música o dispositivo preferido, crear una lista de reproducción de sus canciones favoritas de la película y escuchar las canciones en línea o fuera de línea. También puede compartir su lista de reproducción con sus amigos o familiares, o escuchar las listas de reproducción de otras personas de la misma película.
-
Escuchar canciones de M. Kumaran Son of Mahalakshmi puede hacerte sentir feliz, triste, emocionado, nostálgico o inspirado, dependiendo de la canción y la situación. Las canciones también pueden recordarte la película y sus personajes, y hacerte apreciar más la historia y el mensaje. Las canciones también pueden ayudarle a aprender más sobre la cultura y el idioma tamil, y enriquecer su conocimiento y experiencia.
-
Entonces, ¿qué estás esperando? Descarga las canciones de M. Kumaran Son of Mahalakshmi hoy y disfrútalas al máximo!
-
Preguntas frecuentes
-
P: ¿Cuáles son los nombres de las seis canciones de M. Kumaran Hijo de Mahalakshmi?
-
A: Los nombres de las seis canciones de M. Kumaran Son:
-
-
Ayyo Ayyo
-
Yaaru Yaaru
-
Neeye Neeye
-
Chennai Senthamizh
-
Amma Nee Sumandha
-
Rakkamma
-
-
P: ¿Quiénes son los cantantes de las seis canciones de M. Kumaran Hijo de Mahalakshmi?
-
A: Los cantantes de las seis canciones de M. Kumaran Son:
-
-
Ayyo Ayyo: Shankar Mahadevan y Karthik
-
Yaaru Yaaru: Tippu y Anuradha Sriram
-
Neeye Neeye: Karthik y Sadhana Sargam
-
Chennai Senthamizh: Ranjith, Premji Amaren y Srikanth Deva
-
Amma Nee Sumandha: Srikanth Deva
-
Rakkamma: Tippu y Anuradha Sriram
-
-
P: ¿Dónde puedo ver la película en línea de M. Kumaran Son of Mahalakshmi?
-
A: Puedes ver la película en línea de M. Kumaran Son of Mahalakshmi en varias plataformas de streaming, como:
-
-
-
Amazon Prime Video: Esta es una popular plataforma de streaming que ofrece una variedad de películas y programas en diferentes idiomas y géneros. Puedes ver la película en línea de M. Kumaran Son of Mahalakshmi en Amazon Prime Video por una tarifa con o sin anuncios. También puede descargar la película en su dispositivo para ver sin conexión.
-
-
Q: ¿Cómo puedo aprender más sobre la cultura y el idioma tamil?
-
A: Hay muchas maneras de aprender más sobre la cultura y el idioma tamil, como:
-
-
Leer libros, revistas, periódicos, blogs o sitios web que están escritos en tamil o sobre temas tamiles.
-
Ver películas, programas, documentales o videos que se hacen en tamil o sobre temas tamiles.
-
Escuchar podcasts, estaciones de radio, álbumes de música o canciones que se hablan en tamil o sobre temas tamiles.
-
Tomar cursos, clases, lecciones o tutoriales que enseñan el idioma o la cultura tamil.
-
Unirse a clubes, grupos, comunidades o foros que hablan de la lengua o cultura tamil.
-
Visitar lugares, eventos, festivales o atracciones que muestran el idioma o la cultura tamil.
-
Conocer gente, amigos, familiares o vecinos que hablan tamil o conocen la cultura tamil.
-
-
Q: ¿Cuáles son algunas otras películas que tienen buenas canciones tamiles?
-
A: Hay muchas películas que tienen buenas canciones tamiles, pero algunas de las más populares y aclamadas son:
-
-
-
Roja: Esta es una película romántica de 1992 dirigida por Mani Ratnam y protagonizada por Arvind Swamy y Madhoo. La película trata sobre una mujer que intenta rescatar a su marido que es secuestrado por terroristas en Cachemira. La película tiene una hermosa banda sonora compuesta por A.R. Rahman, con canciones como "Kadhal Rojave", "Chinna Chinna Aasai", "Pudhu Vellai Mazhai" y "Rukkumani Rukkumani".
-
3: Esta es una película romántica de 2012 dirigida por Aishwarya R. Dhanush y protagonizada por Dhanush y Shruti Haasan. La película trata sobre una pareja que enfrenta varios desafíos en su relación debido al trastorno bipolar y la muerte. La película tiene una banda sonora pegadiza compuesta por Anirudh Ravichander, con canciones como "Why This Kolaveri Di", "Idhazhin Oram", "Nee Paartha Vizhigal" y "Po Nee Po".
-
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/BetterAPI/BetterChat/src/lib/utils/randomUuid.ts b/spaces/BetterAPI/BetterChat/src/lib/utils/randomUuid.ts
deleted file mode 100644
index 9d536365c57659305ad28d6fc06b89d77ab337ab..0000000000000000000000000000000000000000
--- a/spaces/BetterAPI/BetterChat/src/lib/utils/randomUuid.ts
+++ /dev/null
@@ -1,14 +0,0 @@
-type UUID = ReturnType;
-
-export function randomUUID(): UUID {
- // Only on old safari / ios
- if (!("randomUUID" in crypto)) {
- return "10000000-1000-4000-8000-100000000000".replace(/[018]/g, (c) =>
- (
- Number(c) ^
- (crypto.getRandomValues(new Uint8Array(1))[0] & (15 >> (Number(c) / 4)))
- ).toString(16)
- ) as UUID;
- }
- return crypto.randomUUID();
-}
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/metadata/importlib/_dists.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/metadata/importlib/_dists.py
deleted file mode 100644
index 65c043c87eff27e9405316fdbc0c695f2b347441..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/metadata/importlib/_dists.py
+++ /dev/null
@@ -1,224 +0,0 @@
-import email.message
-import importlib.metadata
-import os
-import pathlib
-import zipfile
-from typing import (
- Collection,
- Dict,
- Iterable,
- Iterator,
- Mapping,
- Optional,
- Sequence,
- cast,
-)
-
-from pip._vendor.packaging.requirements import Requirement
-from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
-from pip._vendor.packaging.version import parse as parse_version
-
-from pip._internal.exceptions import InvalidWheel, UnsupportedWheel
-from pip._internal.metadata.base import (
- BaseDistribution,
- BaseEntryPoint,
- DistributionVersion,
- InfoPath,
- Wheel,
-)
-from pip._internal.utils.misc import normalize_path
-from pip._internal.utils.packaging import safe_extra
-from pip._internal.utils.temp_dir import TempDirectory
-from pip._internal.utils.wheel import parse_wheel, read_wheel_metadata_file
-
-from ._compat import BasePath, get_dist_name
-
-
-class WheelDistribution(importlib.metadata.Distribution):
- """An ``importlib.metadata.Distribution`` read from a wheel.
-
- Although ``importlib.metadata.PathDistribution`` accepts ``zipfile.Path``,
- its implementation is too "lazy" for pip's needs (we can't keep the ZipFile
- handle open for the entire lifetime of the distribution object).
-
- This implementation eagerly reads the entire metadata directory into the
- memory instead, and operates from that.
- """
-
- def __init__(
- self,
- files: Mapping[pathlib.PurePosixPath, bytes],
- info_location: pathlib.PurePosixPath,
- ) -> None:
- self._files = files
- self.info_location = info_location
-
- @classmethod
- def from_zipfile(
- cls,
- zf: zipfile.ZipFile,
- name: str,
- location: str,
- ) -> "WheelDistribution":
- info_dir, _ = parse_wheel(zf, name)
- paths = (
- (name, pathlib.PurePosixPath(name.split("/", 1)[-1]))
- for name in zf.namelist()
- if name.startswith(f"{info_dir}/")
- )
- files = {
- relpath: read_wheel_metadata_file(zf, fullpath)
- for fullpath, relpath in paths
- }
- info_location = pathlib.PurePosixPath(location, info_dir)
- return cls(files, info_location)
-
- def iterdir(self, path: InfoPath) -> Iterator[pathlib.PurePosixPath]:
- # Only allow iterating through the metadata directory.
- if pathlib.PurePosixPath(str(path)) in self._files:
- return iter(self._files)
- raise FileNotFoundError(path)
-
- def read_text(self, filename: str) -> Optional[str]:
- try:
- data = self._files[pathlib.PurePosixPath(filename)]
- except KeyError:
- return None
- try:
- text = data.decode("utf-8")
- except UnicodeDecodeError as e:
- wheel = self.info_location.parent
- error = f"Error decoding metadata for {wheel}: {e} in {filename} file"
- raise UnsupportedWheel(error)
- return text
-
-
-class Distribution(BaseDistribution):
- def __init__(
- self,
- dist: importlib.metadata.Distribution,
- info_location: Optional[BasePath],
- installed_location: Optional[BasePath],
- ) -> None:
- self._dist = dist
- self._info_location = info_location
- self._installed_location = installed_location
-
- @classmethod
- def from_directory(cls, directory: str) -> BaseDistribution:
- info_location = pathlib.Path(directory)
- dist = importlib.metadata.Distribution.at(info_location)
- return cls(dist, info_location, info_location.parent)
-
- @classmethod
- def from_metadata_file_contents(
- cls,
- metadata_contents: bytes,
- filename: str,
- project_name: str,
- ) -> BaseDistribution:
- # Generate temp dir to contain the metadata file, and write the file contents.
- temp_dir = pathlib.Path(
- TempDirectory(kind="metadata", globally_managed=True).path
- )
- metadata_path = temp_dir / "METADATA"
- metadata_path.write_bytes(metadata_contents)
- # Construct dist pointing to the newly created directory.
- dist = importlib.metadata.Distribution.at(metadata_path.parent)
- return cls(dist, metadata_path.parent, None)
-
- @classmethod
- def from_wheel(cls, wheel: Wheel, name: str) -> BaseDistribution:
- try:
- with wheel.as_zipfile() as zf:
- dist = WheelDistribution.from_zipfile(zf, name, wheel.location)
- except zipfile.BadZipFile as e:
- raise InvalidWheel(wheel.location, name) from e
- except UnsupportedWheel as e:
- raise UnsupportedWheel(f"{name} has an invalid wheel, {e}")
- return cls(dist, dist.info_location, pathlib.PurePosixPath(wheel.location))
-
- @property
- def location(self) -> Optional[str]:
- if self._info_location is None:
- return None
- return str(self._info_location.parent)
-
- @property
- def info_location(self) -> Optional[str]:
- if self._info_location is None:
- return None
- return str(self._info_location)
-
- @property
- def installed_location(self) -> Optional[str]:
- if self._installed_location is None:
- return None
- return normalize_path(str(self._installed_location))
-
- def _get_dist_name_from_location(self) -> Optional[str]:
- """Try to get the name from the metadata directory name.
-
- This is much faster than reading metadata.
- """
- if self._info_location is None:
- return None
- stem, suffix = os.path.splitext(self._info_location.name)
- if suffix not in (".dist-info", ".egg-info"):
- return None
- return stem.split("-", 1)[0]
-
- @property
- def canonical_name(self) -> NormalizedName:
- name = self._get_dist_name_from_location() or get_dist_name(self._dist)
- return canonicalize_name(name)
-
- @property
- def version(self) -> DistributionVersion:
- return parse_version(self._dist.version)
-
- def is_file(self, path: InfoPath) -> bool:
- return self._dist.read_text(str(path)) is not None
-
- def iter_distutils_script_names(self) -> Iterator[str]:
- # A distutils installation is always "flat" (not in e.g. egg form), so
- # if this distribution's info location is NOT a pathlib.Path (but e.g.
- # zipfile.Path), it can never contain any distutils scripts.
- if not isinstance(self._info_location, pathlib.Path):
- return
- for child in self._info_location.joinpath("scripts").iterdir():
- yield child.name
-
- def read_text(self, path: InfoPath) -> str:
- content = self._dist.read_text(str(path))
- if content is None:
- raise FileNotFoundError(path)
- return content
-
- def iter_entry_points(self) -> Iterable[BaseEntryPoint]:
- # importlib.metadata's EntryPoint structure sasitfies BaseEntryPoint.
- return self._dist.entry_points
-
- def _metadata_impl(self) -> email.message.Message:
- # From Python 3.10+, importlib.metadata declares PackageMetadata as the
- # return type. This protocol is unfortunately a disaster now and misses
- # a ton of fields that we need, including get() and get_payload(). We
- # rely on the implementation that the object is actually a Message now,
- # until upstream can improve the protocol. (python/cpython#94952)
- return cast(email.message.Message, self._dist.metadata)
-
- def iter_provided_extras(self) -> Iterable[str]:
- return (
- safe_extra(extra) for extra in self.metadata.get_all("Provides-Extra", [])
- )
-
- def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]:
- contexts: Sequence[Dict[str, str]] = [{"extra": safe_extra(e)} for e in extras]
- for req_string in self.metadata.get_all("Requires-Dist", []):
- req = Requirement(req_string)
- if not req.marker:
- yield req
- elif not extras and req.marker.evaluate({"extra": ""}):
- yield req
- elif any(req.marker.evaluate(context) for context in contexts):
- yield req
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/scripts.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/scripts.py
deleted file mode 100644
index d2706242b8aac125a66450d5ce8dcd3395336182..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/scripts.py
+++ /dev/null
@@ -1,437 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2013-2015 Vinay Sajip.
-# Licensed to the Python Software Foundation under a contributor agreement.
-# See LICENSE.txt and CONTRIBUTORS.txt.
-#
-from io import BytesIO
-import logging
-import os
-import re
-import struct
-import sys
-import time
-from zipfile import ZipInfo
-
-from .compat import sysconfig, detect_encoding, ZipFile
-from .resources import finder
-from .util import (FileOperator, get_export_entry, convert_path,
- get_executable, get_platform, in_venv)
-
-logger = logging.getLogger(__name__)
-
-_DEFAULT_MANIFEST = '''
-
-
-
-
-
-
-
-
-
-
-
-
-'''.strip()
-
-# check if Python is called on the first line with this expression
-FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$')
-SCRIPT_TEMPLATE = r'''# -*- coding: utf-8 -*-
-import re
-import sys
-from %(module)s import %(import_name)s
-if __name__ == '__main__':
- sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
- sys.exit(%(func)s())
-'''
-
-
-def enquote_executable(executable):
- if ' ' in executable:
- # make sure we quote only the executable in case of env
- # for example /usr/bin/env "/dir with spaces/bin/jython"
- # instead of "/usr/bin/env /dir with spaces/bin/jython"
- # otherwise whole
- if executable.startswith('/usr/bin/env '):
- env, _executable = executable.split(' ', 1)
- if ' ' in _executable and not _executable.startswith('"'):
- executable = '%s "%s"' % (env, _executable)
- else:
- if not executable.startswith('"'):
- executable = '"%s"' % executable
- return executable
-
-# Keep the old name around (for now), as there is at least one project using it!
-_enquote_executable = enquote_executable
-
-class ScriptMaker(object):
- """
- A class to copy or create scripts from source scripts or callable
- specifications.
- """
- script_template = SCRIPT_TEMPLATE
-
- executable = None # for shebangs
-
- def __init__(self, source_dir, target_dir, add_launchers=True,
- dry_run=False, fileop=None):
- self.source_dir = source_dir
- self.target_dir = target_dir
- self.add_launchers = add_launchers
- self.force = False
- self.clobber = False
- # It only makes sense to set mode bits on POSIX.
- self.set_mode = (os.name == 'posix') or (os.name == 'java' and
- os._name == 'posix')
- self.variants = set(('', 'X.Y'))
- self._fileop = fileop or FileOperator(dry_run)
-
- self._is_nt = os.name == 'nt' or (
- os.name == 'java' and os._name == 'nt')
- self.version_info = sys.version_info
-
- def _get_alternate_executable(self, executable, options):
- if options.get('gui', False) and self._is_nt: # pragma: no cover
- dn, fn = os.path.split(executable)
- fn = fn.replace('python', 'pythonw')
- executable = os.path.join(dn, fn)
- return executable
-
- if sys.platform.startswith('java'): # pragma: no cover
- def _is_shell(self, executable):
- """
- Determine if the specified executable is a script
- (contains a #! line)
- """
- try:
- with open(executable) as fp:
- return fp.read(2) == '#!'
- except (OSError, IOError):
- logger.warning('Failed to open %s', executable)
- return False
-
- def _fix_jython_executable(self, executable):
- if self._is_shell(executable):
- # Workaround for Jython is not needed on Linux systems.
- import java
-
- if java.lang.System.getProperty('os.name') == 'Linux':
- return executable
- elif executable.lower().endswith('jython.exe'):
- # Use wrapper exe for Jython on Windows
- return executable
- return '/usr/bin/env %s' % executable
-
- def _build_shebang(self, executable, post_interp):
- """
- Build a shebang line. In the simple case (on Windows, or a shebang line
- which is not too long or contains spaces) use a simple formulation for
- the shebang. Otherwise, use /bin/sh as the executable, with a contrived
- shebang which allows the script to run either under Python or sh, using
- suitable quoting. Thanks to Harald Nordgren for his input.
-
- See also: http://www.in-ulm.de/~mascheck/various/shebang/#length
- https://hg.mozilla.org/mozilla-central/file/tip/mach
- """
- if os.name != 'posix':
- simple_shebang = True
- else:
- # Add 3 for '#!' prefix and newline suffix.
- shebang_length = len(executable) + len(post_interp) + 3
- if sys.platform == 'darwin':
- max_shebang_length = 512
- else:
- max_shebang_length = 127
- simple_shebang = ((b' ' not in executable) and
- (shebang_length <= max_shebang_length))
-
- if simple_shebang:
- result = b'#!' + executable + post_interp + b'\n'
- else:
- result = b'#!/bin/sh\n'
- result += b"'''exec' " + executable + post_interp + b' "$0" "$@"\n'
- result += b"' '''"
- return result
-
- def _get_shebang(self, encoding, post_interp=b'', options=None):
- enquote = True
- if self.executable:
- executable = self.executable
- enquote = False # assume this will be taken care of
- elif not sysconfig.is_python_build():
- executable = get_executable()
- elif in_venv(): # pragma: no cover
- executable = os.path.join(sysconfig.get_path('scripts'),
- 'python%s' % sysconfig.get_config_var('EXE'))
- else: # pragma: no cover
- executable = os.path.join(
- sysconfig.get_config_var('BINDIR'),
- 'python%s%s' % (sysconfig.get_config_var('VERSION'),
- sysconfig.get_config_var('EXE')))
- if not os.path.isfile(executable):
- # for Python builds from source on Windows, no Python executables with
- # a version suffix are created, so we use python.exe
- executable = os.path.join(sysconfig.get_config_var('BINDIR'),
- 'python%s' % (sysconfig.get_config_var('EXE')))
- if options:
- executable = self._get_alternate_executable(executable, options)
-
- if sys.platform.startswith('java'): # pragma: no cover
- executable = self._fix_jython_executable(executable)
-
- # Normalise case for Windows - COMMENTED OUT
- # executable = os.path.normcase(executable)
- # N.B. The normalising operation above has been commented out: See
- # issue #124. Although paths in Windows are generally case-insensitive,
- # they aren't always. For example, a path containing a ẞ (which is a
- # LATIN CAPITAL LETTER SHARP S - U+1E9E) is normcased to ß (which is a
- # LATIN SMALL LETTER SHARP S' - U+00DF). The two are not considered by
- # Windows as equivalent in path names.
-
- # If the user didn't specify an executable, it may be necessary to
- # cater for executable paths with spaces (not uncommon on Windows)
- if enquote:
- executable = enquote_executable(executable)
- # Issue #51: don't use fsencode, since we later try to
- # check that the shebang is decodable using utf-8.
- executable = executable.encode('utf-8')
- # in case of IronPython, play safe and enable frames support
- if (sys.platform == 'cli' and '-X:Frames' not in post_interp
- and '-X:FullFrames' not in post_interp): # pragma: no cover
- post_interp += b' -X:Frames'
- shebang = self._build_shebang(executable, post_interp)
- # Python parser starts to read a script using UTF-8 until
- # it gets a #coding:xxx cookie. The shebang has to be the
- # first line of a file, the #coding:xxx cookie cannot be
- # written before. So the shebang has to be decodable from
- # UTF-8.
- try:
- shebang.decode('utf-8')
- except UnicodeDecodeError: # pragma: no cover
- raise ValueError(
- 'The shebang (%r) is not decodable from utf-8' % shebang)
- # If the script is encoded to a custom encoding (use a
- # #coding:xxx cookie), the shebang has to be decodable from
- # the script encoding too.
- if encoding != 'utf-8':
- try:
- shebang.decode(encoding)
- except UnicodeDecodeError: # pragma: no cover
- raise ValueError(
- 'The shebang (%r) is not decodable '
- 'from the script encoding (%r)' % (shebang, encoding))
- return shebang
-
- def _get_script_text(self, entry):
- return self.script_template % dict(module=entry.prefix,
- import_name=entry.suffix.split('.')[0],
- func=entry.suffix)
-
- manifest = _DEFAULT_MANIFEST
-
- def get_manifest(self, exename):
- base = os.path.basename(exename)
- return self.manifest % base
-
- def _write_script(self, names, shebang, script_bytes, filenames, ext):
- use_launcher = self.add_launchers and self._is_nt
- linesep = os.linesep.encode('utf-8')
- if not shebang.endswith(linesep):
- shebang += linesep
- if not use_launcher:
- script_bytes = shebang + script_bytes
- else: # pragma: no cover
- if ext == 'py':
- launcher = self._get_launcher('t')
- else:
- launcher = self._get_launcher('w')
- stream = BytesIO()
- with ZipFile(stream, 'w') as zf:
- source_date_epoch = os.environ.get('SOURCE_DATE_EPOCH')
- if source_date_epoch:
- date_time = time.gmtime(int(source_date_epoch))[:6]
- zinfo = ZipInfo(filename='__main__.py', date_time=date_time)
- zf.writestr(zinfo, script_bytes)
- else:
- zf.writestr('__main__.py', script_bytes)
- zip_data = stream.getvalue()
- script_bytes = launcher + shebang + zip_data
- for name in names:
- outname = os.path.join(self.target_dir, name)
- if use_launcher: # pragma: no cover
- n, e = os.path.splitext(outname)
- if e.startswith('.py'):
- outname = n
- outname = '%s.exe' % outname
- try:
- self._fileop.write_binary_file(outname, script_bytes)
- except Exception:
- # Failed writing an executable - it might be in use.
- logger.warning('Failed to write executable - trying to '
- 'use .deleteme logic')
- dfname = '%s.deleteme' % outname
- if os.path.exists(dfname):
- os.remove(dfname) # Not allowed to fail here
- os.rename(outname, dfname) # nor here
- self._fileop.write_binary_file(outname, script_bytes)
- logger.debug('Able to replace executable using '
- '.deleteme logic')
- try:
- os.remove(dfname)
- except Exception:
- pass # still in use - ignore error
- else:
- if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover
- outname = '%s.%s' % (outname, ext)
- if os.path.exists(outname) and not self.clobber:
- logger.warning('Skipping existing file %s', outname)
- continue
- self._fileop.write_binary_file(outname, script_bytes)
- if self.set_mode:
- self._fileop.set_executable_mode([outname])
- filenames.append(outname)
-
- variant_separator = '-'
-
- def get_script_filenames(self, name):
- result = set()
- if '' in self.variants:
- result.add(name)
- if 'X' in self.variants:
- result.add('%s%s' % (name, self.version_info[0]))
- if 'X.Y' in self.variants:
- result.add('%s%s%s.%s' % (name, self.variant_separator,
- self.version_info[0], self.version_info[1]))
- return result
-
- def _make_script(self, entry, filenames, options=None):
- post_interp = b''
- if options:
- args = options.get('interpreter_args', [])
- if args:
- args = ' %s' % ' '.join(args)
- post_interp = args.encode('utf-8')
- shebang = self._get_shebang('utf-8', post_interp, options=options)
- script = self._get_script_text(entry).encode('utf-8')
- scriptnames = self.get_script_filenames(entry.name)
- if options and options.get('gui', False):
- ext = 'pyw'
- else:
- ext = 'py'
- self._write_script(scriptnames, shebang, script, filenames, ext)
-
- def _copy_script(self, script, filenames):
- adjust = False
- script = os.path.join(self.source_dir, convert_path(script))
- outname = os.path.join(self.target_dir, os.path.basename(script))
- if not self.force and not self._fileop.newer(script, outname):
- logger.debug('not copying %s (up-to-date)', script)
- return
-
- # Always open the file, but ignore failures in dry-run mode --
- # that way, we'll get accurate feedback if we can read the
- # script.
- try:
- f = open(script, 'rb')
- except IOError: # pragma: no cover
- if not self.dry_run:
- raise
- f = None
- else:
- first_line = f.readline()
- if not first_line: # pragma: no cover
- logger.warning('%s is an empty file (skipping)', script)
- return
-
- match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
- if match:
- adjust = True
- post_interp = match.group(1) or b''
-
- if not adjust:
- if f:
- f.close()
- self._fileop.copy_file(script, outname)
- if self.set_mode:
- self._fileop.set_executable_mode([outname])
- filenames.append(outname)
- else:
- logger.info('copying and adjusting %s -> %s', script,
- self.target_dir)
- if not self._fileop.dry_run:
- encoding, lines = detect_encoding(f.readline)
- f.seek(0)
- shebang = self._get_shebang(encoding, post_interp)
- if b'pythonw' in first_line: # pragma: no cover
- ext = 'pyw'
- else:
- ext = 'py'
- n = os.path.basename(outname)
- self._write_script([n], shebang, f.read(), filenames, ext)
- if f:
- f.close()
-
- @property
- def dry_run(self):
- return self._fileop.dry_run
-
- @dry_run.setter
- def dry_run(self, value):
- self._fileop.dry_run = value
-
- if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover
- # Executable launcher support.
- # Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
-
- def _get_launcher(self, kind):
- if struct.calcsize('P') == 8: # 64-bit
- bits = '64'
- else:
- bits = '32'
- platform_suffix = '-arm' if get_platform() == 'win-arm64' else ''
- name = '%s%s%s.exe' % (kind, bits, platform_suffix)
- # Issue 31: don't hardcode an absolute package name, but
- # determine it relative to the current package
- distlib_package = __name__.rsplit('.', 1)[0]
- resource = finder(distlib_package).find(name)
- if not resource:
- msg = ('Unable to find resource %s in package %s' % (name,
- distlib_package))
- raise ValueError(msg)
- return resource.bytes
-
- # Public API follows
-
- def make(self, specification, options=None):
- """
- Make a script.
-
- :param specification: The specification, which is either a valid export
- entry specification (to make a script from a
- callable) or a filename (to make a script by
- copying from a source location).
- :param options: A dictionary of options controlling script generation.
- :return: A list of all absolute pathnames written to.
- """
- filenames = []
- entry = get_export_entry(specification)
- if entry is None:
- self._copy_script(specification, filenames)
- else:
- self._make_script(entry, filenames, options=options)
- return filenames
-
- def make_multiple(self, specifications, options=None):
- """
- Take a list of specifications and make scripts from them,
- :param specifications: A list of specifications.
- :return: A list of all absolute pathnames written to,
- """
- filenames = []
- for specification in specifications:
- filenames.extend(self.make(specification, options))
- return filenames
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/wheel.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/wheel.py
deleted file mode 100644
index 527ed3b23306a3822388520115bafaf3eabb5024..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/wheel.py
+++ /dev/null
@@ -1,222 +0,0 @@
-"""Wheels support."""
-
-import email
-import itertools
-import os
-import posixpath
-import re
-import zipfile
-import contextlib
-
-from distutils.util import get_platform
-
-import pkg_resources
-import setuptools
-from pkg_resources import parse_version
-from setuptools.extern.packaging.tags import sys_tags
-from setuptools.extern.packaging.utils import canonicalize_name
-from setuptools.command.egg_info import write_requirements
-from setuptools.archive_util import _unpack_zipfile_obj
-
-
-WHEEL_NAME = re.compile(
- r"""^(?P.+?)-(?P\d.*?)
- ((-(?P\d.*?))?-(?P.+?)-(?P.+?)-(?P.+?)
- )\.whl$""",
- re.VERBOSE).match
-
-NAMESPACE_PACKAGE_INIT = \
- "__import__('pkg_resources').declare_namespace(__name__)\n"
-
-
-def unpack(src_dir, dst_dir):
- '''Move everything under `src_dir` to `dst_dir`, and delete the former.'''
- for dirpath, dirnames, filenames in os.walk(src_dir):
- subdir = os.path.relpath(dirpath, src_dir)
- for f in filenames:
- src = os.path.join(dirpath, f)
- dst = os.path.join(dst_dir, subdir, f)
- os.renames(src, dst)
- for n, d in reversed(list(enumerate(dirnames))):
- src = os.path.join(dirpath, d)
- dst = os.path.join(dst_dir, subdir, d)
- if not os.path.exists(dst):
- # Directory does not exist in destination,
- # rename it and prune it from os.walk list.
- os.renames(src, dst)
- del dirnames[n]
- # Cleanup.
- for dirpath, dirnames, filenames in os.walk(src_dir, topdown=True):
- assert not filenames
- os.rmdir(dirpath)
-
-
-@contextlib.contextmanager
-def disable_info_traces():
- """
- Temporarily disable info traces.
- """
- from distutils import log
- saved = log.set_threshold(log.WARN)
- try:
- yield
- finally:
- log.set_threshold(saved)
-
-
-class Wheel:
-
- def __init__(self, filename):
- match = WHEEL_NAME(os.path.basename(filename))
- if match is None:
- raise ValueError('invalid wheel name: %r' % filename)
- self.filename = filename
- for k, v in match.groupdict().items():
- setattr(self, k, v)
-
- def tags(self):
- '''List tags (py_version, abi, platform) supported by this wheel.'''
- return itertools.product(
- self.py_version.split('.'),
- self.abi.split('.'),
- self.platform.split('.'),
- )
-
- def is_compatible(self):
- '''Is the wheel is compatible with the current platform?'''
- supported_tags = set(
- (t.interpreter, t.abi, t.platform) for t in sys_tags())
- return next((True for t in self.tags() if t in supported_tags), False)
-
- def egg_name(self):
- return pkg_resources.Distribution(
- project_name=self.project_name, version=self.version,
- platform=(None if self.platform == 'any' else get_platform()),
- ).egg_name() + '.egg'
-
- def get_dist_info(self, zf):
- # find the correct name of the .dist-info dir in the wheel file
- for member in zf.namelist():
- dirname = posixpath.dirname(member)
- if (dirname.endswith('.dist-info') and
- canonicalize_name(dirname).startswith(
- canonicalize_name(self.project_name))):
- return dirname
- raise ValueError("unsupported wheel format. .dist-info not found")
-
- def install_as_egg(self, destination_eggdir):
- '''Install wheel as an egg directory.'''
- with zipfile.ZipFile(self.filename) as zf:
- self._install_as_egg(destination_eggdir, zf)
-
- def _install_as_egg(self, destination_eggdir, zf):
- dist_basename = '%s-%s' % (self.project_name, self.version)
- dist_info = self.get_dist_info(zf)
- dist_data = '%s.data' % dist_basename
- egg_info = os.path.join(destination_eggdir, 'EGG-INFO')
-
- self._convert_metadata(zf, destination_eggdir, dist_info, egg_info)
- self._move_data_entries(destination_eggdir, dist_data)
- self._fix_namespace_packages(egg_info, destination_eggdir)
-
- @staticmethod
- def _convert_metadata(zf, destination_eggdir, dist_info, egg_info):
- def get_metadata(name):
- with zf.open(posixpath.join(dist_info, name)) as fp:
- value = fp.read().decode('utf-8')
- return email.parser.Parser().parsestr(value)
-
- wheel_metadata = get_metadata('WHEEL')
- # Check wheel format version is supported.
- wheel_version = parse_version(wheel_metadata.get('Wheel-Version'))
- wheel_v1 = (
- parse_version('1.0') <= wheel_version < parse_version('2.0dev0')
- )
- if not wheel_v1:
- raise ValueError(
- 'unsupported wheel format version: %s' % wheel_version)
- # Extract to target directory.
- _unpack_zipfile_obj(zf, destination_eggdir)
- # Convert metadata.
- dist_info = os.path.join(destination_eggdir, dist_info)
- dist = pkg_resources.Distribution.from_location(
- destination_eggdir, dist_info,
- metadata=pkg_resources.PathMetadata(destination_eggdir, dist_info),
- )
-
- # Note: Evaluate and strip markers now,
- # as it's difficult to convert back from the syntax:
- # foobar; "linux" in sys_platform and extra == 'test'
- def raw_req(req):
- req.marker = None
- return str(req)
- install_requires = list(map(raw_req, dist.requires()))
- extras_require = {
- extra: [
- req
- for req in map(raw_req, dist.requires((extra,)))
- if req not in install_requires
- ]
- for extra in dist.extras
- }
- os.rename(dist_info, egg_info)
- os.rename(
- os.path.join(egg_info, 'METADATA'),
- os.path.join(egg_info, 'PKG-INFO'),
- )
- setup_dist = setuptools.Distribution(
- attrs=dict(
- install_requires=install_requires,
- extras_require=extras_require,
- ),
- )
- with disable_info_traces():
- write_requirements(
- setup_dist.get_command_obj('egg_info'),
- None,
- os.path.join(egg_info, 'requires.txt'),
- )
-
- @staticmethod
- def _move_data_entries(destination_eggdir, dist_data):
- """Move data entries to their correct location."""
- dist_data = os.path.join(destination_eggdir, dist_data)
- dist_data_scripts = os.path.join(dist_data, 'scripts')
- if os.path.exists(dist_data_scripts):
- egg_info_scripts = os.path.join(
- destination_eggdir, 'EGG-INFO', 'scripts')
- os.mkdir(egg_info_scripts)
- for entry in os.listdir(dist_data_scripts):
- # Remove bytecode, as it's not properly handled
- # during easy_install scripts install phase.
- if entry.endswith('.pyc'):
- os.unlink(os.path.join(dist_data_scripts, entry))
- else:
- os.rename(
- os.path.join(dist_data_scripts, entry),
- os.path.join(egg_info_scripts, entry),
- )
- os.rmdir(dist_data_scripts)
- for subdir in filter(os.path.exists, (
- os.path.join(dist_data, d)
- for d in ('data', 'headers', 'purelib', 'platlib')
- )):
- unpack(subdir, destination_eggdir)
- if os.path.exists(dist_data):
- os.rmdir(dist_data)
-
- @staticmethod
- def _fix_namespace_packages(egg_info, destination_eggdir):
- namespace_packages = os.path.join(
- egg_info, 'namespace_packages.txt')
- if os.path.exists(namespace_packages):
- with open(namespace_packages) as fp:
- namespace_packages = fp.read().split()
- for mod in namespace_packages:
- mod_dir = os.path.join(destination_eggdir, *mod.split('.'))
- mod_init = os.path.join(mod_dir, '__init__.py')
- if not os.path.exists(mod_dir):
- os.mkdir(mod_dir)
- if not os.path.exists(mod_init):
- with open(mod_init, 'w') as fp:
- fp.write(NAMESPACE_PACKAGE_INIT)
diff --git a/spaces/BramVanroy/text-to-amr/README.md b/spaces/BramVanroy/text-to-amr/README.md
deleted file mode 100644
index cbb58b9fd48f161517fda5b04e5c413aec044477..0000000000000000000000000000000000000000
--- a/spaces/BramVanroy/text-to-amr/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
----
-title: Text To AMR
-emoji: 👩💻
-colorFrom: yellow
-colorTo: gray
-sdk: docker
-app_port: 8501
-app_file: app.py
-pinned: true
-license: gpl-3.0
-tags:
- - natural language processing
- - semantic parsing
- - abstract meaning representation
- - amr
----
diff --git a/spaces/C6AI/HDRL/README.md b/spaces/C6AI/HDRL/README.md
deleted file mode 100644
index 6412f3386f0294d7fc0c0696b331c672c0379cc3..0000000000000000000000000000000000000000
--- a/spaces/C6AI/HDRL/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Livebook
-emoji: 📓
-colorFrom: pink
-colorTo: purple
-sdk: docker
-fullWidth: true
-duplicated_from: livebook-dev/livebook
-license: mit
----
-
-You can install and run [Livebook](https://livebook.dev/) inside a Hugging Face Space. Here's [a tutorial](https://huggingface.co/docs/hub/spaces-sdks-docker-livebook) on how to do that.
\ No newline at end of file
diff --git a/spaces/CCaniggia/GPT/Dockerfile b/spaces/CCaniggia/GPT/Dockerfile
deleted file mode 100644
index ad25254725cfd7305edf205272a35c9f781d1081..0000000000000000000000000000000000000000
--- a/spaces/CCaniggia/GPT/Dockerfile
+++ /dev/null
@@ -1,11 +0,0 @@
-from golang:alpine as builder
-run apk --no-cache add git
-run git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app
-workdir /workspace/app
-run go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go
-from alpine
-workdir /workspace/app
-copy --from=builder /workspace/app/go-proxy-bingai .
-env Go_Proxy_BingAI_USER_TOKEN_1="kJs8hD92ncMzLaoQWYtx5rG6bE3fZ4iO"
-expose 8080
-cmd ["/workspace/app/go-proxy-bingai"]
\ No newline at end of file
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/utils/visualizer.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/utils/visualizer.py
deleted file mode 100644
index a8aa4f4682d1db3195f5104da6686258cfa6fd3d..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/utils/visualizer.py
+++ /dev/null
@@ -1,1133 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-import colorsys
-import logging
-import math
-import numpy as np
-from enum import Enum, unique
-import cv2
-import matplotlib as mpl
-import matplotlib.colors as mplc
-import matplotlib.figure as mplfigure
-import pycocotools.mask as mask_util
-import torch
-from matplotlib.backends.backend_agg import FigureCanvasAgg
-
-from detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes
-
-from .colormap import random_color
-
-logger = logging.getLogger(__name__)
-
-__all__ = ["ColorMode", "VisImage", "Visualizer"]
-
-
-_SMALL_OBJECT_AREA_THRESH = 1000
-_LARGE_MASK_AREA_THRESH = 120000
-_OFF_WHITE = (1.0, 1.0, 240.0 / 255)
-_BLACK = (0, 0, 0)
-_RED = (1.0, 0, 0)
-
-_KEYPOINT_THRESHOLD = 0.05
-
-
-@unique
-class ColorMode(Enum):
- """
- Enum of different color modes to use for instance visualizations.
-
- Attributes:
- IMAGE: Picks a random color for every instance and overlay segmentations with low opacity.
- SEGMENTATION: Let instances of the same category have similar colors
- (from metadata.thing_colors), and overlay them with
- high opacity. This provides more attention on the quality of segmentation.
- IMAGE_BW: same as IMAGE, but convert all areas without masks to gray-scale.
- Only available for drawing per-instance mask predictions.
- """
-
- IMAGE = 0
- SEGMENTATION = 1
- IMAGE_BW = 2
-
-
-class GenericMask:
- """
- Attribute:
- polygons (list[ndarray]): list[ndarray]: polygons for this mask.
- Each ndarray has format [x, y, x, y, ...]
- mask (ndarray): a binary mask
- """
-
- def __init__(self, mask_or_polygons, height, width):
- self._mask = self._polygons = self._has_holes = None
- self.height = height
- self.width = width
-
- m = mask_or_polygons
- if isinstance(m, dict):
- # RLEs
- assert "counts" in m and "size" in m
- if isinstance(m["counts"], list): # uncompressed RLEs
- h, w = m["size"]
- assert h == height and w == width
- m = mask_util.frPyObjects(m, h, w)
- self._mask = mask_util.decode(m)[:, :]
- return
-
- if isinstance(m, list): # list[ndarray]
- self._polygons = [np.asarray(x).reshape(-1) for x in m]
- return
-
- if isinstance(m, np.ndarray): # assumed to be a binary mask
- assert m.shape[1] != 2, m.shape
- assert m.shape == (height, width), m.shape
- self._mask = m.astype("uint8")
- return
-
- raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m)))
-
- @property
- def mask(self):
- if self._mask is None:
- self._mask = self.polygons_to_mask(self._polygons)
- return self._mask
-
- @property
- def polygons(self):
- if self._polygons is None:
- self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
- return self._polygons
-
- @property
- def has_holes(self):
- if self._has_holes is None:
- if self._mask is not None:
- self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
- else:
- self._has_holes = False # if original format is polygon, does not have holes
- return self._has_holes
-
- def mask_to_polygons(self, mask):
- # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level
- # hierarchy. External contours (boundary) of the object are placed in hierarchy-1.
- # Internal contours (holes) are placed in hierarchy-2.
- # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours.
- mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr
- res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
- hierarchy = res[-1]
- if hierarchy is None: # empty mask
- return [], False
- has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0
- res = res[-2]
- res = [x.flatten() for x in res]
- res = [x for x in res if len(x) >= 6]
- return res, has_holes
-
- def polygons_to_mask(self, polygons):
- rle = mask_util.frPyObjects(polygons, self.height, self.width)
- rle = mask_util.merge(rle)
- return mask_util.decode(rle)[:, :]
-
- def area(self):
- return self.mask.sum()
-
- def bbox(self):
- p = mask_util.frPyObjects(self.polygons, self.height, self.width)
- p = mask_util.merge(p)
- bbox = mask_util.toBbox(p)
- bbox[2] += bbox[0]
- bbox[3] += bbox[1]
- return bbox
-
-
-class _PanopticPrediction:
- def __init__(self, panoptic_seg, segments_info):
- self._seg = panoptic_seg
-
- self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info
- segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True)
- areas = areas.numpy()
- sorted_idxs = np.argsort(-areas)
- self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs]
- self._seg_ids = self._seg_ids.tolist()
- for sid, area in zip(self._seg_ids, self._seg_areas):
- if sid in self._sinfo:
- self._sinfo[sid]["area"] = float(area)
-
- def non_empty_mask(self):
- """
- Returns:
- (H, W) array, a mask for all pixels that have a prediction
- """
- empty_ids = []
- for id in self._seg_ids:
- if id not in self._sinfo:
- empty_ids.append(id)
- if len(empty_ids) == 0:
- return np.zeros(self._seg.shape, dtype=np.uint8)
- assert (
- len(empty_ids) == 1
- ), ">1 ids corresponds to no labels. This is currently not supported"
- return (self._seg != empty_ids[0]).numpy().astype(np.bool)
-
- def semantic_masks(self):
- for sid in self._seg_ids:
- sinfo = self._sinfo.get(sid)
- if sinfo is None or sinfo["isthing"]:
- # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions.
- continue
- yield (self._seg == sid).numpy().astype(np.bool), sinfo
-
- def instance_masks(self):
- for sid in self._seg_ids:
- sinfo = self._sinfo.get(sid)
- if sinfo is None or not sinfo["isthing"]:
- continue
- mask = (self._seg == sid).numpy().astype(np.bool)
- if mask.sum() > 0:
- yield mask, sinfo
-
-
-def _create_text_labels(classes, scores, class_names):
- """
- Args:
- classes (list[int] or None):
- scores (list[float] or None):
- class_names (list[str] or None):
-
- Returns:
- list[str] or None
- """
- labels = None
- if classes is not None and class_names is not None and len(class_names) > 1:
- labels = [class_names[i] for i in classes]
- if scores is not None:
- if labels is None:
- labels = ["{:.0f}%".format(s * 100) for s in scores]
- else:
- labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)]
- return labels
-
-
-class VisImage:
- def __init__(self, img, scale=1.0):
- """
- Args:
- img (ndarray): an RGB image of shape (H, W, 3).
- scale (float): scale the input image
- """
- self.img = img
- self.scale = scale
- self.width, self.height = img.shape[1], img.shape[0]
- self._setup_figure(img)
-
- def _setup_figure(self, img):
- """
- Args:
- Same as in :meth:`__init__()`.
-
- Returns:
- fig (matplotlib.pyplot.figure): top level container for all the image plot elements.
- ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system.
- """
- fig = mplfigure.Figure(frameon=False)
- self.dpi = fig.get_dpi()
- # add a small 1e-2 to avoid precision lost due to matplotlib's truncation
- # (https://github.com/matplotlib/matplotlib/issues/15363)
- fig.set_size_inches(
- (self.width * self.scale + 1e-2) / self.dpi,
- (self.height * self.scale + 1e-2) / self.dpi,
- )
- self.canvas = FigureCanvasAgg(fig)
- # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig)
- ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
- ax.axis("off")
- ax.set_xlim(0.0, self.width)
- ax.set_ylim(self.height)
-
- self.fig = fig
- self.ax = ax
-
- def save(self, filepath):
- """
- Args:
- filepath (str): a string that contains the absolute path, including the file name, where
- the visualized image will be saved.
- """
- if filepath.lower().endswith(".jpg") or filepath.lower().endswith(".png"):
- # faster than matplotlib's imshow
- cv2.imwrite(filepath, self.get_image()[:, :, ::-1])
- else:
- # support general formats (e.g. pdf)
- self.ax.imshow(self.img, interpolation="nearest")
- self.fig.savefig(filepath)
-
- def get_image(self):
- """
- Returns:
- ndarray: the visualized image of shape (H, W, 3) (RGB) in uint8 type.
- The shape is scaled w.r.t the input image using the given `scale` argument.
- """
- canvas = self.canvas
- s, (width, height) = canvas.print_to_buffer()
- if (self.width, self.height) != (width, height):
- img = cv2.resize(self.img, (width, height))
- else:
- img = self.img
-
- # buf = io.BytesIO() # works for cairo backend
- # canvas.print_rgba(buf)
- # width, height = self.width, self.height
- # s = buf.getvalue()
-
- buffer = np.frombuffer(s, dtype="uint8")
-
- # imshow is slow. blend manually (still quite slow)
- img_rgba = buffer.reshape(height, width, 4)
- rgb, alpha = np.split(img_rgba, [3], axis=2)
-
- try:
- import numexpr as ne # fuse them with numexpr
-
- visualized_image = ne.evaluate("img * (1 - alpha / 255.0) + rgb * (alpha / 255.0)")
- except ImportError:
- alpha = alpha.astype("float32") / 255.0
- visualized_image = img * (1 - alpha) + rgb * alpha
-
- visualized_image = visualized_image.astype("uint8")
-
- return visualized_image
-
-
-class Visualizer:
- def __init__(self, img_rgb, metadata, scale=1.0, instance_mode=ColorMode.IMAGE):
- """
- Args:
- img_rgb: a numpy array of shape (H, W, C), where H and W correspond to
- the height and width of the image respectively. C is the number of
- color channels. The image is required to be in RGB format since that
- is a requirement of the Matplotlib library. The image is also expected
- to be in the range [0, 255].
- metadata (MetadataCatalog): image metadata.
- """
- self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)
- self.metadata = metadata
- self.output = VisImage(self.img, scale=scale)
- self.cpu_device = torch.device("cpu")
-
- # too small texts are useless, therefore clamp to 9
- self._default_font_size = max(
- np.sqrt(self.output.height * self.output.width) // 90, 10 // scale
- )
- self._instance_mode = instance_mode
-
- def draw_instance_predictions(self, predictions):
- """
- Draw instance-level prediction results on an image.
-
- Args:
- predictions (Instances): the output of an instance detection/segmentation
- model. Following fields will be used to draw:
- "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle").
-
- Returns:
- output (VisImage): image object with visualizations.
- """
- boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None
- scores = predictions.scores if predictions.has("scores") else None
- classes = predictions.pred_classes if predictions.has("pred_classes") else None
- labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None))
- keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None
-
- if predictions.has("pred_masks"):
- masks = np.asarray(predictions.pred_masks)
- masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]
- else:
- masks = None
-
- if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
- colors = [
- self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes
- ]
- alpha = 0.8
- else:
- colors = None
- alpha = 0.5
-
- if self._instance_mode == ColorMode.IMAGE_BW:
- assert predictions.has("pred_masks"), "ColorMode.IMAGE_BW requires segmentations"
- self.output.img = self._create_grayscale_image(
- (predictions.pred_masks.any(dim=0) > 0).numpy()
- )
- alpha = 0.3
-
- self.overlay_instances(
- masks=masks,
- boxes=boxes,
- labels=labels,
- keypoints=keypoints,
- assigned_colors=colors,
- alpha=alpha,
- )
- return self.output
-
- def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8):
- """
- Draw semantic segmentation predictions/labels.
-
- Args:
- sem_seg (Tensor or ndarray): the segmentation of shape (H, W).
- area_threshold (int): segments with less than `area_threshold` are not drawn.
- alpha (float): the larger it is, the more opaque the segmentations are.
-
- Returns:
- output (VisImage): image object with visualizations.
- """
- if isinstance(sem_seg, torch.Tensor):
- sem_seg = sem_seg.numpy()
- labels, areas = np.unique(sem_seg, return_counts=True)
- sorted_idxs = np.argsort(-areas).tolist()
- labels = labels[sorted_idxs]
- for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels):
- try:
- mask_color = [x / 255 for x in self.metadata.stuff_colors[label]]
- except (AttributeError, IndexError):
- mask_color = None
-
- binary_mask = (sem_seg == label).astype(np.uint8)
- text = self.metadata.stuff_classes[label]
- self.draw_binary_mask(
- binary_mask,
- color=mask_color,
- edge_color=_OFF_WHITE,
- text=text,
- alpha=alpha,
- area_threshold=area_threshold,
- )
- return self.output
-
- def draw_panoptic_seg_predictions(
- self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7
- ):
- """
- Draw panoptic prediction results on an image.
-
- Args:
- panoptic_seg (Tensor): of shape (height, width) where the values are ids for each
- segment.
- segments_info (list[dict]): Describe each segment in `panoptic_seg`.
- Each dict contains keys "id", "category_id", "isthing".
- area_threshold (int): stuff segments with less than `area_threshold` are not drawn.
-
- Returns:
- output (VisImage): image object with visualizations.
- """
- pred = _PanopticPrediction(panoptic_seg, segments_info)
-
- if self._instance_mode == ColorMode.IMAGE_BW:
- self.output.img = self._create_grayscale_image(pred.non_empty_mask())
-
- # draw mask for all semantic segments first i.e. "stuff"
- for mask, sinfo in pred.semantic_masks():
- category_idx = sinfo["category_id"]
- try:
- mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]]
- except AttributeError:
- mask_color = None
-
- text = self.metadata.stuff_classes[category_idx]
- self.draw_binary_mask(
- mask,
- color=mask_color,
- edge_color=_OFF_WHITE,
- text=text,
- alpha=alpha,
- area_threshold=area_threshold,
- )
-
- # draw mask for all instances second
- all_instances = list(pred.instance_masks())
- if len(all_instances) == 0:
- return self.output
- masks, sinfo = list(zip(*all_instances))
- category_ids = [x["category_id"] for x in sinfo]
-
- try:
- scores = [x["score"] for x in sinfo]
- except KeyError:
- scores = None
- labels = _create_text_labels(category_ids, scores, self.metadata.thing_classes)
-
- try:
- colors = [random_color(rgb=True, maximum=1) for k in category_ids]
- except AttributeError:
- colors = None
- self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha)
-
- return self.output
-
- def draw_dataset_dict(self, dic):
- """
- Draw annotations/segmentaions in Detectron2 Dataset format.
-
- Args:
- dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format.
-
- Returns:
- output (VisImage): image object with visualizations.
- """
- annos = dic.get("annotations", None)
- if annos:
- if "segmentation" in annos[0]:
- masks = [x["segmentation"] for x in annos]
- else:
- masks = None
- if "keypoints" in annos[0]:
- keypts = [x["keypoints"] for x in annos]
- keypts = np.array(keypts).reshape(len(annos), -1, 3)
- else:
- keypts = None
-
- boxes = [BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) for x in annos]
-
- labels = [x["category_id"] for x in annos]
- colors = None
- if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
- colors = [
- self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in labels
- ]
- names = self.metadata.get("thing_classes", None)
- if names:
- labels = [names[i] for i in labels]
- labels = [
- "{}".format(i) + ("|crowd" if a.get("iscrowd", 0) else "")
- for i, a in zip(labels, annos)
- ]
- self.overlay_instances(
- labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors
- )
-
- sem_seg = dic.get("sem_seg", None)
- if sem_seg is None and "sem_seg_file_name" in dic:
- sem_seg = cv2.imread(dic["sem_seg_file_name"], cv2.IMREAD_GRAYSCALE)
- if sem_seg is not None:
- self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5)
- return self.output
-
- def overlay_instances(
- self,
- *,
- boxes=None,
- labels=None,
- masks=None,
- keypoints=None,
- assigned_colors=None,
- alpha=0.5
- ):
- """
- Args:
- boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,
- or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,
- or a :class:`RotatedBoxes`,
- or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format
- for the N objects in a single image,
- labels (list[str]): the text to be displayed for each instance.
- masks (masks-like object): Supported types are:
-
- * `structures.masks.PolygonMasks`, `structures.masks.BitMasks`.
- * list[list[ndarray]]: contains the segmentation masks for all objects in one image.
- The first level of the list corresponds to individual instances. The second
- level to all the polygon that compose the instance, and the third level
- to the polygon coordinates. The third level should have the format of
- [x0, y0, x1, y1, ..., xn, yn] (n >= 3).
- * list[ndarray]: each ndarray is a binary mask of shape (H, W).
- * list[dict]: each dict is a COCO-style RLE.
- keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),
- where the N is the number of instances and K is the number of keypoints.
- The last dimension corresponds to (x, y, visibility or score).
- assigned_colors (list[matplotlib.colors]): a list of colors, where each color
- corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
- for full list of formats that the colors are accepted in.
-
- Returns:
- output (VisImage): image object with visualizations.
- """
- num_instances = None
- if boxes is not None:
- boxes = self._convert_boxes(boxes)
- num_instances = len(boxes)
- if masks is not None:
- masks = self._convert_masks(masks)
- if num_instances:
- assert len(masks) == num_instances
- else:
- num_instances = len(masks)
- if keypoints is not None:
- if num_instances:
- assert len(keypoints) == num_instances
- else:
- num_instances = len(keypoints)
- keypoints = self._convert_keypoints(keypoints)
- if labels is not None:
- assert len(labels) == num_instances
- if assigned_colors is None:
- assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
- if num_instances == 0:
- return self.output
- if boxes is not None and boxes.shape[1] == 5:
- return self.overlay_rotated_instances(
- boxes=boxes, labels=labels, assigned_colors=assigned_colors
- )
-
- # Display in largest to smallest order to reduce occlusion.
- areas = None
- if boxes is not None:
- areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
- elif masks is not None:
- areas = np.asarray([x.area() for x in masks])
-
- if areas is not None:
- sorted_idxs = np.argsort(-areas).tolist()
- # Re-order overlapped instances in descending order.
- boxes = boxes[sorted_idxs] if boxes is not None else None
- labels = [labels[k] for k in sorted_idxs] if labels is not None else None
- masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None
- assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
- keypoints = keypoints[sorted_idxs] if keypoints is not None else None
-
- for i in range(num_instances):
- color = assigned_colors[i]
- if boxes is not None:
- self.draw_box(boxes[i], edge_color=color)
-
- if masks is not None:
- for segment in masks[i].polygons:
- self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)
-
- if labels is not None:
- # first get a box
- if boxes is not None:
- x0, y0, x1, y1 = boxes[i]
- text_pos = (x0, y0) # if drawing boxes, put text on the box corner.
- horiz_align = "left"
- elif masks is not None:
- x0, y0, x1, y1 = masks[i].bbox()
-
- # draw text in the center (defined by median) when box is not drawn
- # median is less sensitive to outliers.
- text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
- horiz_align = "center"
- else:
- continue # drawing the box confidence for keypoints isn't very useful.
- # for small objects, draw text at the side to avoid occlusion
- instance_area = (y1 - y0) * (x1 - x0)
- if (
- instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale
- or y1 - y0 < 40 * self.output.scale
- ):
- if y1 >= self.output.height - 5:
- text_pos = (x1, y0)
- else:
- text_pos = (x0, y1)
-
- height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)
- lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
- font_size = (
- np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
- * 0.5
- * self._default_font_size
- )
- self.draw_text(
- labels[i],
- text_pos,
- color=lighter_color,
- horizontal_alignment=horiz_align,
- font_size=font_size,
- )
-
- # draw keypoints
- if keypoints is not None:
- for keypoints_per_instance in keypoints:
- self.draw_and_connect_keypoints(keypoints_per_instance)
-
- return self.output
-
- def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None):
- """
- Args:
- boxes (ndarray): an Nx5 numpy array of
- (x_center, y_center, width, height, angle_degrees) format
- for the N objects in a single image.
- labels (list[str]): the text to be displayed for each instance.
- assigned_colors (list[matplotlib.colors]): a list of colors, where each color
- corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
- for full list of formats that the colors are accepted in.
-
- Returns:
- output (VisImage): image object with visualizations.
- """
-
- num_instances = len(boxes)
-
- if assigned_colors is None:
- assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
- if num_instances == 0:
- return self.output
-
- # Display in largest to smallest order to reduce occlusion.
- if boxes is not None:
- areas = boxes[:, 2] * boxes[:, 3]
-
- sorted_idxs = np.argsort(-areas).tolist()
- # Re-order overlapped instances in descending order.
- boxes = boxes[sorted_idxs]
- labels = [labels[k] for k in sorted_idxs] if labels is not None else None
- colors = [assigned_colors[idx] for idx in sorted_idxs]
-
- for i in range(num_instances):
- self.draw_rotated_box_with_label(
- boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None
- )
-
- return self.output
-
- def draw_and_connect_keypoints(self, keypoints):
- """
- Draws keypoints of an instance and follows the rules for keypoint connections
- to draw lines between appropriate keypoints. This follows color heuristics for
- line color.
-
- Args:
- keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints
- and the last dimension corresponds to (x, y, probability).
-
- Returns:
- output (VisImage): image object with visualizations.
- """
- visible = {}
- keypoint_names = self.metadata.get("keypoint_names")
- for idx, keypoint in enumerate(keypoints):
- # draw keypoint
- x, y, prob = keypoint
- if prob > _KEYPOINT_THRESHOLD:
- self.draw_circle((x, y), color=_RED)
- if keypoint_names:
- keypoint_name = keypoint_names[idx]
- visible[keypoint_name] = (x, y)
-
- if self.metadata.get("keypoint_connection_rules"):
- for kp0, kp1, color in self.metadata.keypoint_connection_rules:
- if kp0 in visible and kp1 in visible:
- x0, y0 = visible[kp0]
- x1, y1 = visible[kp1]
- color = tuple(x / 255.0 for x in color)
- self.draw_line([x0, x1], [y0, y1], color=color)
-
- # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip
- # Note that this strategy is specific to person keypoints.
- # For other keypoints, it should just do nothing
- try:
- ls_x, ls_y = visible["left_shoulder"]
- rs_x, rs_y = visible["right_shoulder"]
- mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2
- except KeyError:
- pass
- else:
- # draw line from nose to mid-shoulder
- nose_x, nose_y = visible.get("nose", (None, None))
- if nose_x is not None:
- self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED)
-
- try:
- # draw line from mid-shoulder to mid-hip
- lh_x, lh_y = visible["left_hip"]
- rh_x, rh_y = visible["right_hip"]
- except KeyError:
- pass
- else:
- mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2
- self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED)
- return self.output
-
- """
- Primitive drawing functions:
- """
-
- def draw_text(
- self,
- text,
- position,
- *,
- font_size=None,
- color="g",
- horizontal_alignment="center",
- rotation=0
- ):
- """
- Args:
- text (str): class label
- position (tuple): a tuple of the x and y coordinates to place text on image.
- font_size (int, optional): font of the text. If not provided, a font size
- proportional to the image width is calculated and used.
- color: color of the text. Refer to `matplotlib.colors` for full list
- of formats that are accepted.
- horizontal_alignment (str): see `matplotlib.text.Text`
- rotation: rotation angle in degrees CCW
-
- Returns:
- output (VisImage): image object with text drawn.
- """
- if not font_size:
- font_size = self._default_font_size
-
- # since the text background is dark, we don't want the text to be dark
- color = np.maximum(list(mplc.to_rgb(color)), 0.2)
- color[np.argmax(color)] = max(0.8, np.max(color))
-
- x, y = position
- self.output.ax.text(
- x,
- y,
- text,
- size=font_size * self.output.scale,
- family="sans-serif",
- bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"},
- verticalalignment="top",
- horizontalalignment=horizontal_alignment,
- color=color,
- zorder=10,
- rotation=rotation,
- )
- return self.output
-
- def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"):
- """
- Args:
- box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0
- are the coordinates of the image's top left corner. x1 and y1 are the
- coordinates of the image's bottom right corner.
- alpha (float): blending efficient. Smaller values lead to more transparent masks.
- edge_color: color of the outline of the box. Refer to `matplotlib.colors`
- for full list of formats that are accepted.
- line_style (string): the string to use to create the outline of the boxes.
-
- Returns:
- output (VisImage): image object with box drawn.
- """
- x0, y0, x1, y1 = box_coord
- width = x1 - x0
- height = y1 - y0
-
- linewidth = max(self._default_font_size / 4, 1)
-
- self.output.ax.add_patch(
- mpl.patches.Rectangle(
- (x0, y0),
- width,
- height,
- fill=False,
- edgecolor=edge_color,
- linewidth=linewidth * self.output.scale,
- alpha=alpha,
- linestyle=line_style,
- )
- )
- return self.output
-
- def draw_rotated_box_with_label(
- self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None
- ):
- """
- Args:
- rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle),
- where cnt_x and cnt_y are the center coordinates of the box.
- w and h are the width and height of the box. angle represents how
- many degrees the box is rotated CCW with regard to the 0-degree box.
- alpha (float): blending efficient. Smaller values lead to more transparent masks.
- edge_color: color of the outline of the box. Refer to `matplotlib.colors`
- for full list of formats that are accepted.
- line_style (string): the string to use to create the outline of the boxes.
- label (string): label for rotated box. It will not be rendered when set to None.
-
- Returns:
- output (VisImage): image object with box drawn.
- """
- cnt_x, cnt_y, w, h, angle = rotated_box
- area = w * h
- # use thinner lines when the box is small
- linewidth = self._default_font_size / (
- 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3
- )
-
- theta = angle * math.pi / 180.0
- c = math.cos(theta)
- s = math.sin(theta)
- rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)]
- # x: left->right ; y: top->down
- rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect]
- for k in range(4):
- j = (k + 1) % 4
- self.draw_line(
- [rotated_rect[k][0], rotated_rect[j][0]],
- [rotated_rect[k][1], rotated_rect[j][1]],
- color=edge_color,
- linestyle="--" if k == 1 else line_style,
- linewidth=linewidth,
- )
-
- if label is not None:
- text_pos = rotated_rect[1] # topleft corner
-
- height_ratio = h / np.sqrt(self.output.height * self.output.width)
- label_color = self._change_color_brightness(edge_color, brightness_factor=0.7)
- font_size = (
- np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size
- )
- self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle)
-
- return self.output
-
- def draw_circle(self, circle_coord, color, radius=3):
- """
- Args:
- circle_coord (list(int) or tuple(int)): contains the x and y coordinates
- of the center of the circle.
- color: color of the polygon. Refer to `matplotlib.colors` for a full list of
- formats that are accepted.
- radius (int): radius of the circle.
-
- Returns:
- output (VisImage): image object with box drawn.
- """
- x, y = circle_coord
- self.output.ax.add_patch(
- mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color)
- )
- return self.output
-
- def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None):
- """
- Args:
- x_data (list[int]): a list containing x values of all the points being drawn.
- Length of list should match the length of y_data.
- y_data (list[int]): a list containing y values of all the points being drawn.
- Length of list should match the length of x_data.
- color: color of the line. Refer to `matplotlib.colors` for a full list of
- formats that are accepted.
- linestyle: style of the line. Refer to `matplotlib.lines.Line2D`
- for a full list of formats that are accepted.
- linewidth (float or None): width of the line. When it's None,
- a default value will be computed and used.
-
- Returns:
- output (VisImage): image object with line drawn.
- """
- if linewidth is None:
- linewidth = self._default_font_size / 3
- linewidth = max(linewidth, 1)
- self.output.ax.add_line(
- mpl.lines.Line2D(
- x_data,
- y_data,
- linewidth=linewidth * self.output.scale,
- color=color,
- linestyle=linestyle,
- )
- )
- return self.output
-
- def draw_binary_mask(
- self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=4096
- ):
- """
- Args:
- binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and
- W is the image width. Each value in the array is either a 0 or 1 value of uint8
- type.
- color: color of the mask. Refer to `matplotlib.colors` for a full list of
- formats that are accepted. If None, will pick a random color.
- edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
- full list of formats that are accepted.
- text (str): if None, will be drawn in the object's center of mass.
- alpha (float): blending efficient. Smaller values lead to more transparent masks.
- area_threshold (float): a connected component small than this will not be shown.
-
- Returns:
- output (VisImage): image object with mask drawn.
- """
- if color is None:
- color = random_color(rgb=True, maximum=1)
- if area_threshold is None:
- area_threshold = 4096
-
- has_valid_segment = False
- binary_mask = binary_mask.astype("uint8") # opencv needs uint8
- mask = GenericMask(binary_mask, self.output.height, self.output.width)
- shape2d = (binary_mask.shape[0], binary_mask.shape[1])
-
- if not mask.has_holes:
- # draw polygons for regular masks
- for segment in mask.polygons:
- area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))
- if area < area_threshold:
- continue
- has_valid_segment = True
- segment = segment.reshape(-1, 2)
- self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)
- else:
- rgba = np.zeros(shape2d + (4,), dtype="float32")
- rgba[:, :, :3] = color
- rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha
- has_valid_segment = True
- self.output.ax.imshow(rgba)
-
- if text is not None and has_valid_segment:
- # TODO sometimes drawn on wrong objects. the heuristics here can improve.
- lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
- _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)
- largest_component_id = np.argmax(stats[1:, -1]) + 1
-
- # draw text on the largest component, as well as other very large components.
- for cid in range(1, _num_cc):
- if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:
- # median is more stable than centroid
- # center = centroids[largest_component_id]
- center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]
- self.draw_text(text, center, color=lighter_color)
- return self.output
-
- def draw_polygon(self, segment, color, edge_color=None, alpha=0.5):
- """
- Args:
- segment: numpy array of shape Nx2, containing all the points in the polygon.
- color: color of the polygon. Refer to `matplotlib.colors` for a full list of
- formats that are accepted.
- edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
- full list of formats that are accepted. If not provided, a darker shade
- of the polygon color will be used instead.
- alpha (float): blending efficient. Smaller values lead to more transparent masks.
-
- Returns:
- output (VisImage): image object with polygon drawn.
- """
- if edge_color is None:
- # make edge color darker than the polygon color
- if alpha > 0.8:
- edge_color = self._change_color_brightness(color, brightness_factor=-0.7)
- else:
- edge_color = color
- edge_color = mplc.to_rgb(edge_color) + (1,)
-
- polygon = mpl.patches.Polygon(
- segment,
- fill=True,
- facecolor=mplc.to_rgb(color) + (alpha,),
- edgecolor=edge_color,
- linewidth=max(self._default_font_size // 15 * self.output.scale, 1),
- )
- self.output.ax.add_patch(polygon)
- return self.output
-
- """
- Internal methods:
- """
-
- def _jitter(self, color):
- """
- Randomly modifies given color to produce a slightly different color than the color given.
-
- Args:
- color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color
- picked. The values in the list are in the [0.0, 1.0] range.
-
- Returns:
- jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the
- color after being jittered. The values in the list are in the [0.0, 1.0] range.
- """
- color = mplc.to_rgb(color)
- vec = np.random.rand(3)
- # better to do it in another color space
- vec = vec / np.linalg.norm(vec) * 0.5
- res = np.clip(vec + color, 0, 1)
- return tuple(res)
-
- def _create_grayscale_image(self, mask=None):
- """
- Create a grayscale version of the original image.
- The colors in masked area, if given, will be kept.
- """
- img_bw = self.img.astype("f4").mean(axis=2)
- img_bw = np.stack([img_bw] * 3, axis=2)
- if mask is not None:
- img_bw[mask] = self.img[mask]
- return img_bw
-
- def _change_color_brightness(self, color, brightness_factor):
- """
- Depending on the brightness_factor, gives a lighter or darker color i.e. a color with
- less or more saturation than the original color.
-
- Args:
- color: color of the polygon. Refer to `matplotlib.colors` for a full list of
- formats that are accepted.
- brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of
- 0 will correspond to no change, a factor in [-1.0, 0) range will result in
- a darker color and a factor in (0, 1.0] range will result in a lighter color.
-
- Returns:
- modified_color (tuple[double]): a tuple containing the RGB values of the
- modified color. Each value in the tuple is in the [0.0, 1.0] range.
- """
- assert brightness_factor >= -1.0 and brightness_factor <= 1.0
- color = mplc.to_rgb(color)
- polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))
- modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])
- modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness
- modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness
- modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2])
- return modified_color
-
- def _convert_boxes(self, boxes):
- """
- Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension.
- """
- if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes):
- return boxes.tensor.numpy()
- else:
- return np.asarray(boxes)
-
- def _convert_masks(self, masks_or_polygons):
- """
- Convert different format of masks or polygons to a tuple of masks and polygons.
-
- Returns:
- list[GenericMask]:
- """
-
- m = masks_or_polygons
- if isinstance(m, PolygonMasks):
- m = m.polygons
- if isinstance(m, BitMasks):
- m = m.tensor.numpy()
- if isinstance(m, torch.Tensor):
- m = m.numpy()
- ret = []
- for x in m:
- if isinstance(x, GenericMask):
- ret.append(x)
- else:
- ret.append(GenericMask(x, self.output.height, self.output.width))
- return ret
-
- def _convert_keypoints(self, keypoints):
- if isinstance(keypoints, Keypoints):
- keypoints = keypoints.tensor
- keypoints = np.asarray(keypoints)
- return keypoints
-
- def get_output(self):
- """
- Returns:
- output (VisImage): the image output containing the visualizations added
- to the image.
- """
- return self.output
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tools/plain_train_net.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tools/plain_train_net.py
deleted file mode 100644
index e3fe1db98d097423fc42243e8ef0b505d06505ee..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tools/plain_train_net.py
+++ /dev/null
@@ -1,231 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-"""
-Detectron2 training script with a plain training loop.
-
-This scripts reads a given config file and runs the training or evaluation.
-It is an entry point that is able to train standard models in detectron2.
-
-In order to let one script support training of many models,
-this script contains logic that are specific to these built-in models and therefore
-may not be suitable for your own project.
-For example, your research project perhaps only needs a single "evaluator".
-
-Therefore, we recommend you to use detectron2 as an library and take
-this file as an example of how to use the library.
-You may want to write your own script with your datasets and other customizations.
-
-Compared to "train_net.py", this script supports fewer default features.
-It also includes fewer abstraction, therefore is easier to add custom logic.
-"""
-
-import logging
-import os
-from collections import OrderedDict
-import torch
-from torch.nn.parallel import DistributedDataParallel
-
-import detectron2.utils.comm as comm
-from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer
-from detectron2.config import get_cfg
-from detectron2.data import (
- MetadataCatalog,
- build_detection_test_loader,
- build_detection_train_loader,
-)
-from detectron2.engine import default_argument_parser, default_setup, launch
-from detectron2.evaluation import (
- CityscapesEvaluator,
- COCOEvaluator,
- COCOPanopticEvaluator,
- DatasetEvaluators,
- LVISEvaluator,
- PascalVOCDetectionEvaluator,
- SemSegEvaluator,
- inference_on_dataset,
- print_csv_format,
-)
-from detectron2.modeling import build_model
-from detectron2.solver import build_lr_scheduler, build_optimizer
-from detectron2.utils.events import (
- CommonMetricPrinter,
- EventStorage,
- JSONWriter,
- TensorboardXWriter,
-)
-
-logger = logging.getLogger("detectron2")
-
-
-def get_evaluator(cfg, dataset_name, output_folder=None):
- """
- Create evaluator(s) for a given dataset.
- This uses the special metadata "evaluator_type" associated with each builtin dataset.
- For your own dataset, you can simply create an evaluator manually in your
- script and do not have to worry about the hacky if-else logic here.
- """
- if output_folder is None:
- output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
- evaluator_list = []
- evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
- if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
- evaluator_list.append(
- SemSegEvaluator(
- dataset_name,
- distributed=True,
- num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,
- ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
- output_dir=output_folder,
- )
- )
- if evaluator_type in ["coco", "coco_panoptic_seg"]:
- evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))
- if evaluator_type == "coco_panoptic_seg":
- evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
- if evaluator_type == "cityscapes":
- assert (
- torch.cuda.device_count() >= comm.get_rank()
- ), "CityscapesEvaluator currently do not work with multiple machines."
- return CityscapesEvaluator(dataset_name)
- if evaluator_type == "pascal_voc":
- return PascalVOCDetectionEvaluator(dataset_name)
- if evaluator_type == "lvis":
- return LVISEvaluator(dataset_name, cfg, True, output_folder)
- if len(evaluator_list) == 0:
- raise NotImplementedError(
- "no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type)
- )
- if len(evaluator_list) == 1:
- return evaluator_list[0]
- return DatasetEvaluators(evaluator_list)
-
-
-def do_test(cfg, model):
- results = OrderedDict()
- for dataset_name in cfg.DATASETS.TEST:
- data_loader = build_detection_test_loader(cfg, dataset_name)
- evaluator = get_evaluator(
- cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
- )
- results_i = inference_on_dataset(model, data_loader, evaluator)
- results[dataset_name] = results_i
- if comm.is_main_process():
- logger.info("Evaluation results for {} in csv format:".format(dataset_name))
- print_csv_format(results_i)
- if len(results) == 1:
- results = list(results.values())[0]
- return results
-
-
-def do_train(cfg, model, resume=False):
- model.train()
- optimizer = build_optimizer(cfg, model)
- scheduler = build_lr_scheduler(cfg, optimizer)
-
- checkpointer = DetectionCheckpointer(
- model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler
- )
- start_iter = (
- checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1
- )
- max_iter = cfg.SOLVER.MAX_ITER
-
- periodic_checkpointer = PeriodicCheckpointer(
- checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter
- )
-
- writers = (
- [
- CommonMetricPrinter(max_iter),
- JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")),
- TensorboardXWriter(cfg.OUTPUT_DIR),
- ]
- if comm.is_main_process()
- else []
- )
-
- # compared to "train_net.py", we do not support accurate timing and
- # precise BN here, because they are not trivial to implement
- data_loader = build_detection_train_loader(cfg)
- logger.info("Starting training from iteration {}".format(start_iter))
- with EventStorage(start_iter) as storage:
- for data, iteration in zip(data_loader, range(start_iter, max_iter)):
- iteration = iteration + 1
- storage.step()
-
- loss_dict = model(data)
- losses = sum(loss_dict.values())
- assert torch.isfinite(losses).all(), loss_dict
-
- loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()}
- losses_reduced = sum(loss for loss in loss_dict_reduced.values())
- if comm.is_main_process():
- storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced)
-
- optimizer.zero_grad()
- losses.backward()
- optimizer.step()
- storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False)
- scheduler.step()
-
- if (
- cfg.TEST.EVAL_PERIOD > 0
- and iteration % cfg.TEST.EVAL_PERIOD == 0
- and iteration != max_iter
- ):
- do_test(cfg, model)
- # Compared to "train_net.py", the test results are not dumped to EventStorage
- comm.synchronize()
-
- if iteration - start_iter > 5 and (iteration % 20 == 0 or iteration == max_iter):
- for writer in writers:
- writer.write()
- periodic_checkpointer.step(iteration)
-
-
-def setup(args):
- """
- Create configs and perform basic setups.
- """
- cfg = get_cfg()
- cfg.merge_from_file(args.config_file)
- cfg.merge_from_list(args.opts)
- cfg.freeze()
- default_setup(
- cfg, args
- ) # if you don't like any of the default setup, write your own setup code
- return cfg
-
-
-def main(args):
- cfg = setup(args)
-
- model = build_model(cfg)
- logger.info("Model:\n{}".format(model))
- if args.eval_only:
- DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
- cfg.MODEL.WEIGHTS, resume=args.resume
- )
- return do_test(cfg, model)
-
- distributed = comm.get_world_size() > 1
- if distributed:
- model = DistributedDataParallel(
- model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
- )
-
- do_train(cfg, model)
- return do_test(cfg, model)
-
-
-if __name__ == "__main__":
- args = default_argument_parser().parse_args()
- print("Command Line Args:", args)
- launch(
- main,
- args.num_gpus,
- num_machines=args.num_machines,
- machine_rank=args.machine_rank,
- dist_url=args.dist_url,
- args=(args,),
- )
diff --git a/spaces/CVPR/LIVE/thrust/thrust/async/sort.h b/spaces/CVPR/LIVE/thrust/thrust/async/sort.h
deleted file mode 100644
index c665c6467e372929efbb586a8ffa19b761601c39..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/async/sort.h
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
- * Copyright 2008-2018 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*! \file async/sort.h
- * \brief Functions for asynchronously sorting a range.
- */
-
-#pragma once
-
-#include
-#include
-
-#if THRUST_CPP_DIALECT >= 2014
-
-#include
-#include
-#include
-#include
-#include
-#include
-
-#include
-
-namespace thrust
-{
-
-namespace async
-{
-
-namespace unimplemented
-{
-
-template <
- typename DerivedPolicy
-, typename ForwardIt, typename Sentinel, typename StrictWeakOrdering
->
-__host__
-event
-async_stable_sort(
- thrust::execution_policy&
-, ForwardIt, Sentinel, StrictWeakOrdering
-)
-{
- THRUST_STATIC_ASSERT_MSG(
- (thrust::detail::depend_on_instantiation::value)
- , "this algorithm is not implemented for the specified system"
- );
- return {};
-}
-
-} // namespace unimplemented
-
-namespace stable_sort_detail
-{
-
-using thrust::async::unimplemented::async_stable_sort;
-
-struct stable_sort_fn final
-{
- template <
- typename DerivedPolicy
- , typename ForwardIt, typename Sentinel, typename StrictWeakOrdering
- >
- __host__
- static auto call(
- thrust::detail::execution_policy_base const& exec
- , ForwardIt&& first, Sentinel&& last
- , StrictWeakOrdering&& comp
- )
- // ADL dispatch.
- THRUST_RETURNS(
- async_stable_sort(
- thrust::detail::derived_cast(thrust::detail::strip_const(exec))
- , THRUST_FWD(first), THRUST_FWD(last)
- , THRUST_FWD(comp)
- )
- )
-
- template <
- typename DerivedPolicy
- , typename ForwardIt, typename Sentinel
- >
- __host__
- static auto call(
- thrust::detail::execution_policy_base const& exec
- , ForwardIt&& first, Sentinel&& last
- )
- // ADL dispatch.
- THRUST_RETURNS(
- async_stable_sort(
- thrust::detail::derived_cast(thrust::detail::strip_const(exec))
- , THRUST_FWD(first), THRUST_FWD(last)
- , thrust::less<
- typename iterator_traits>::value_type
- >{}
- )
- )
-
- template
- __host__
- static auto call(ForwardIt&& first, Sentinel&& last, StrictWeakOrdering&& comp)
- THRUST_RETURNS(
- stable_sort_fn::call(
- thrust::detail::select_system(
- typename iterator_system>::type{}
- )
- , THRUST_FWD(first), THRUST_FWD(last)
- , THRUST_FWD(comp)
- )
- )
-
- template
- __host__
- static auto call(ForwardIt&& first, Sentinel&& last)
- THRUST_RETURNS(
- stable_sort_fn::call(
- THRUST_FWD(first), THRUST_FWD(last)
- , thrust::less<
- typename iterator_traits>::value_type
- >{}
- )
- )
-
- template
- THRUST_NODISCARD __host__
- auto operator()(Args&&... args) const
- THRUST_RETURNS(
- call(THRUST_FWD(args)...)
- )
-};
-
-} // namespace stable_sort_detail
-
-THRUST_INLINE_CONSTANT stable_sort_detail::stable_sort_fn stable_sort{};
-
-namespace fallback
-{
-
-template <
- typename DerivedPolicy
-, typename ForwardIt, typename Sentinel, typename StrictWeakOrdering
->
-__host__
-event
-async_sort(
- thrust::execution_policy& exec
-, ForwardIt&& first, Sentinel&& last, StrictWeakOrdering&& comp
-)
-{
- return async_stable_sort(
- thrust::detail::derived_cast(exec)
- , THRUST_FWD(first), THRUST_FWD(last), THRUST_FWD(comp)
- );
-}
-
-} // namespace fallback
-
-namespace sort_detail
-{
-
-using thrust::async::fallback::async_sort;
-
-struct sort_fn final
-{
- template <
- typename DerivedPolicy
- , typename ForwardIt, typename Sentinel, typename StrictWeakOrdering
- >
- __host__
- static auto call(
- thrust::detail::execution_policy_base const& exec
- , ForwardIt&& first, Sentinel&& last
- , StrictWeakOrdering&& comp
- )
- // ADL dispatch.
- THRUST_RETURNS(
- async_sort(
- thrust::detail::derived_cast(thrust::detail::strip_const(exec))
- , THRUST_FWD(first), THRUST_FWD(last)
- , THRUST_FWD(comp)
- )
- )
-
- template <
- typename DerivedPolicy
- , typename ForwardIt, typename Sentinel
- >
- __host__
- static auto call3(
- thrust::detail::execution_policy_base const& exec
- , ForwardIt&& first, Sentinel&& last
- , thrust::true_type
- )
- THRUST_RETURNS(
- sort_fn::call(
- exec
- , THRUST_FWD(first), THRUST_FWD(last)
- , thrust::less<
- typename iterator_traits>::value_type
- >{}
- )
- )
-
- template
- __host__
- static auto call3(ForwardIt&& first, Sentinel&& last,
- StrictWeakOrdering&& comp,
- thrust::false_type)
- THRUST_RETURNS(
- sort_fn::call(
- thrust::detail::select_system(
- typename iterator_system>::type{}
- )
- , THRUST_FWD(first), THRUST_FWD(last)
- , THRUST_FWD(comp)
- )
- )
-
- // MSVC WAR: MSVC gets angsty and eats all available RAM when we try to detect
- // if T1 is an execution_policy by using SFINAE. Switching to a static
- // dispatch pattern to prevent this.
- template
- __host__
- static auto call(T1&& t1, T2&& t2, T3&& t3)
- THRUST_RETURNS(
- sort_fn::call3(THRUST_FWD(t1), THRUST_FWD(t2), THRUST_FWD(t3),
- thrust::is_execution_policy>{})
- )
-
- template
- __host__
- static auto call(ForwardIt&& first, Sentinel&& last)
- THRUST_RETURNS(
- sort_fn::call(
- thrust::detail::select_system(
- typename iterator_system>::type{}
- )
- , THRUST_FWD(first), THRUST_FWD(last)
- , thrust::less<
- typename iterator_traits>::value_type
- >{}
- )
- )
-
- template
- THRUST_NODISCARD __host__
- auto operator()(Args&&... args) const
- THRUST_RETURNS(
- call(THRUST_FWD(args)...)
- )
-};
-
-} // namespace sort_detail
-
-THRUST_INLINE_CONSTANT sort_detail::sort_fn sort{};
-
-} // namespace async
-
-} // end namespace thrust
-
-#endif
-
diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/tabulate.h b/spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/tabulate.h
deleted file mode 100644
index 6ae2b22a5cbd3d2705cf4b13757c050b7c6161cc..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/system/detail/adl/tabulate.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a fill of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include
-
-// the purpose of this header is to #include the tabulate.h header
-// of the sequential, host, and device systems. It should be #included in any
-// code which uses adl to dispatch tabulate
-
-#include
-
-// SCons can't see through the #defines below to figure out what this header
-// includes, so we fake it out by specifying all possible files we might end up
-// including inside an #if 0.
-#if 0
-#include
-#include
-#include
-#include
-#endif
-
-#define __THRUST_HOST_SYSTEM_TABULATE_HEADER <__THRUST_HOST_SYSTEM_ROOT/detail/tabulate.h>
-#include __THRUST_HOST_SYSTEM_TABULATE_HEADER
-#undef __THRUST_HOST_SYSTEM_TABULATE_HEADER
-
-#define __THRUST_DEVICE_SYSTEM_TABULATE_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/tabulate.h>
-#include __THRUST_DEVICE_SYSTEM_TABULATE_HEADER
-#undef __THRUST_DEVICE_SYSTEM_TABULATE_HEADER
-
diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/extrema.h b/spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/extrema.h
deleted file mode 100644
index e0dd4c042b38bafb42d683e2f4f19bab3678a4b4..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/extrema.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include
-#include
-#include
-
-namespace thrust
-{
-namespace system
-{
-namespace tbb
-{
-namespace detail
-{
-
-template
-ForwardIterator max_element(execution_policy &exec,
- ForwardIterator first,
- ForwardIterator last,
- BinaryPredicate comp)
-{
- // tbb prefers generic::max_element to cpp::max_element
- return thrust::system::detail::generic::max_element(exec, first, last, comp);
-} // end max_element()
-
-template
-ForwardIterator min_element(execution_policy &exec,
- ForwardIterator first,
- ForwardIterator last,
- BinaryPredicate comp)
-{
- // tbb prefers generic::min_element to cpp::min_element
- return thrust::system::detail::generic::min_element(exec, first, last, comp);
-} // end min_element()
-
-template
-thrust::pair minmax_element(execution_policy &exec,
- ForwardIterator first,
- ForwardIterator last,
- BinaryPredicate comp)
-{
- // tbb prefers generic::minmax_element to cpp::minmax_element
- return thrust::system::detail::generic::minmax_element(exec, first, last, comp);
-} // end minmax_element()
-
-} // end detail
-} // end tbb
-} // end system
-} // end thrust
-
-
diff --git a/spaces/CVPR/v-doc_abstractive_mac/ops.py b/spaces/CVPR/v-doc_abstractive_mac/ops.py
deleted file mode 100644
index 52c6b54c94592e1ef27b0d28ba3bc8bffcd952a8..0000000000000000000000000000000000000000
--- a/spaces/CVPR/v-doc_abstractive_mac/ops.py
+++ /dev/null
@@ -1,1067 +0,0 @@
-from __future__ import division
-import math
-import tensorflow as tf
-
-from mi_gru_cell import MiGRUCell
-from mi_lstm_cell import MiLSTMCell
-from config import config
-
-eps = 1e-20
-inf = 1e30
-
-####################################### variables ########################################
-
-'''
-Initializes a weight matrix variable given a shape and a name.
-Uses random_normal initialization if 1d, otherwise uses xavier.
-'''
-def getWeight(shape, name = ""):
- with tf.variable_scope("weights"):
- initializer = tf.contrib.layers.xavier_initializer()
- # if len(shape) == 1: # good?
- # initializer = tf.random_normal_initializer()
- W = tf.get_variable("weight" + name, shape = shape, initializer = initializer)
- return W
-
-'''
-Initializes a weight matrix variable given a shape and a name. Uses xavier
-'''
-def getKernel(shape, name = ""):
- with tf.variable_scope("kernels"):
- initializer = tf.contrib.layers.xavier_initializer()
- W = tf.get_variable("kernel" + name, shape = shape, initializer = initializer)
- return W
-
-'''
-Initializes a bias variable given a shape and a name.
-'''
-def getBias(shape, name = ""):
- with tf.variable_scope("biases"):
- initializer = tf.zeros_initializer()
- b = tf.get_variable("bias" + name, shape = shape, initializer = initializer)
- return b
-
-######################################### basics #########################################
-
-'''
-Multiplies input inp of any depth by a 2d weight matrix.
-'''
-# switch with conv 1?
-def multiply(inp, W):
- inDim = tf.shape(W)[0]
- outDim = tf.shape(W)[1]
- newDims = tf.concat([tf.shape(inp)[:-1], tf.fill((1,), outDim)], axis = 0)
-
- inp = tf.reshape(inp, (-1, inDim))
- output = tf.matmul(inp, W)
- output = tf.reshape(output, newDims)
-
- return output
-
-'''
-Concatenates x and y. Support broadcasting.
-Optionally concatenate multiplication of x * y
-'''
-def concat(x, y, dim, mul = False, extendY = False):
- if extendY:
- y = tf.expand_dims(y, axis = -2)
- # broadcasting to have the same shape
- y = tf.zeros_like(x) + y
-
- if mul:
- out = tf.concat([x, y, x * y], axis = -1)
- dim *= 3
- else:
- out = tf.concat([x, y], axis = -1)
- dim *= 2
-
- return out, dim
-
-'''
-Adds L2 regularization for weight and kernel variables.
-'''
-# add l2 in the tf way
-def L2RegularizationOp(l2 = None):
- if l2 is None:
- l2 = config.l2
- l2Loss = 0
- names = ["weight", "kernel"]
- for var in tf.trainable_variables():
- if any((name in var.name.lower()) for name in names):
- l2Loss += tf.nn.l2_loss(var)
- return l2 * l2Loss
-
-######################################### attention #########################################
-
-'''
-Transform vectors to scalar logits.
-
-Args:
- interactions: input vectors
- [batchSize, N, dim]
-
- dim: dimension of input vectors
-
- sumMod: LIN for linear transformation to scalars.
- SUM to sum up vectors entries to get scalar logit.
-
- dropout: dropout value over inputs (for linear case)
-
-Return matching scalar for each interaction.
-[batchSize, N]
-'''
-sumMod = ["LIN", "SUM"]
-def inter2logits(interactions, dim, sumMod = "LIN", dropout = 1.0, name = "", reuse = None):
- with tf.variable_scope("inter2logits" + name, reuse = reuse):
- if sumMod == "SUM":
- logits = tf.reduce_sum(interactions, axis = -1)
- else: # "LIN"
- logits = linear(interactions, dim, 1, dropout = dropout, name = "logits")
- return logits
-
-'''
-Transforms vectors to probability distribution.
-Calls inter2logits and then softmax over these.
-
-Args:
- interactions: input vectors
- [batchSize, N, dim]
-
- dim: dimension of input vectors
-
- sumMod: LIN for linear transformation to scalars.
- SUM to sum up vectors entries to get scalar logit.
-
- dropout: dropout value over inputs (for linear case)
-
-Return attention distribution over interactions.
-[batchSize, N]
-'''
-def inter2att(interactions, dim, dropout = 1.0, name = "", reuse = None):
- with tf.variable_scope("inter2att" + name, reuse = reuse):
- logits = inter2logits(interactions, dim, dropout = dropout)
- attention = tf.nn.softmax(logits)
- return attention
-
-'''
-Sums up features using attention distribution to get a weighted average over them.
-'''
-def att2Smry(attention, features):
- return tf.reduce_sum(tf.expand_dims(attention, axis = -1) * features, axis = -2)
-
-####################################### activations ########################################
-
-'''
-Performs a variant of ReLU based on config.relu
- PRM for PReLU
- ELU for ELU
- LKY for Leaky ReLU
- otherwise, standard ReLU
-'''
-def relu(inp):
- if config.relu == "PRM":
- with tf.variable_scope(None, default_name = "prelu"):
- alpha = tf.get_variable("alpha", shape = inp.get_shape()[-1],
- initializer = tf.constant_initializer(0.25))
- pos = tf.nn.relu(inp)
- neg = - (alpha * tf.nn.relu(-inp))
- output = pos + neg
- elif config.relu == "ELU":
- output = tf.nn.elu(inp)
- # elif config.relu == "SELU":
- # output = tf.nn.selu(inp)
- elif config.relu == "LKY":
- # output = tf.nn.leaky_relu(inp, config.reluAlpha)
- output = tf.maximum(inp, config.reluAlpha * inp)
- elif config.relu == "STD": # STD
- output = tf.nn.relu(inp)
-
- return output
-
-activations = {
- "NON": tf.identity, # lambda inp: inp
- "TANH": tf.tanh,
- "SIGMOID": tf.sigmoid,
- "RELU": relu,
- "ELU": tf.nn.elu
-}
-
-# Sample from Gumbel(0, 1)
-def sampleGumbel(shape):
- U = tf.random_uniform(shape, minval = 0, maxval = 1)
- return -tf.log(-tf.log(U + eps) + eps)
-
-# Draw a clevr_sample from the Gumbel-Softmax distribution
-def gumbelSoftmaxSample(logits, temperature):
- y = logits + sampleGumbel(tf.shape(logits))
- return tf.nn.softmax(y / temperature)
-
-def gumbelSoftmax(logits, temperature, train): # hard = False
- # Sample from the Gumbel-Softmax distribution and optionally discretize.
- # Args:
- # logits: [batch_size, n_class] unnormalized log-probs
- # temperature: non-negative scalar
- # hard: if True, take argmax, but differentiate w.r.t. soft clevr_sample y
- # Returns:
- # [batch_size, n_class] clevr_sample from the Gumbel-Softmax distribution.
- # If hard=True, then the returned clevr_sample will be one-hot, otherwise it will
- # be a probabilitiy distribution that sums to 1 across classes
-
- y = gumbelSoftmaxSample(logits, temperature)
-
- # k = tf.shape(logits)[-1]
- # yHard = tf.cast(tf.one_hot(tf.argmax(y,1),k), y.dtype)
- yHard = tf.cast(tf.equal(y, tf.reduce_max(y, 1, keep_dims = True)), y.dtype)
- yNew = tf.stop_gradient(yHard - y) + y
-
- if config.gumbelSoftmaxBoth:
- return y
- if config.gumbelArgmaxBoth:
- return yNew
- ret = tf.cond(train, lambda: y, lambda: yNew)
-
- return ret
-
-def softmaxDiscrete(logits, temperature, train):
- if config.gumbelSoftmax:
- return gumbelSoftmax(logits, temperature = temperature, train = train)
- else:
- return tf.nn.softmax(logits)
-
-def parametricDropout(name, train):
- var = tf.get_variable("varDp" + name, shape = (), initializer = tf.constant_initializer(2),
- dtype = tf.float32)
- dropout = tf.cond(train, lambda: tf.sigmoid(var), lambda: 1.0)
- return dropout
-
-###################################### sequence helpers ######################################
-
-'''
-Casts exponential mask over a sequence with sequence length.
-Used to prepare logits before softmax.
-'''
-def expMask(seq, seqLength):
- maxLength = tf.shape(seq)[-1]
- mask = (1 - tf.cast(tf.sequence_mask(seqLength, maxLength), tf.float32)) * (-inf)
- masked = seq + mask
- return masked
-
-'''
-Computes seq2seq loss between logits and target sequences, with given lengths.
-'''
-def seq2SeqLoss(logits, targets, lengths):
- mask = tf.sequence_mask(lengths, maxlen = tf.shape(targets)[1])
- loss = tf.contrib.seq2seq.sequence_loss(logits, targets, tf.to_float(mask))
- return loss
-
-'''
-Computes seq2seq loss between logits and target sequences, with given lengths.
- acc1: accuracy per symbol
- acc2: accuracy per sequence
-'''
-def seq2seqAcc(preds, targets, lengths):
- mask = tf.sequence_mask(lengths, maxlen = tf.shape(targets)[1])
- corrects = tf.logical_and(tf.equal(preds, targets), mask)
- numCorrects = tf.reduce_sum(tf.to_int32(corrects), axis = 1)
-
- acc1 = tf.to_float(numCorrects) / (tf.to_float(lengths) + eps) # add small eps instead?
- acc1 = tf.reduce_mean(acc1)
-
- acc2 = tf.to_float(tf.equal(numCorrects, lengths))
- acc2 = tf.reduce_mean(acc2)
-
- return acc1, acc2
-
-########################################### linear ###########################################
-
-'''
-linear transformation.
-
-Args:
- inp: input to transform
- inDim: input dimension
- outDim: output dimension
- dropout: dropout over input
- batchNorm: if not None, applies batch normalization to inputs
- addBias: True to add bias
- bias: initial bias value
- act: if not None, activation to use after linear transformation
- actLayer: if True and act is not None, applies another linear transformation on top of previous
- actDropout: dropout to apply in the optional second linear transformation
- retVars: if True, return parameters (weight and bias)
-
-Returns linear transformation result.
-'''
-# batchNorm = {"decay": float, "train": Tensor}
-# actLayer: if activation is not non, stack another linear layer
-# maybe change naming scheme such that if name = "" than use it as default_name (-->unique?)
-def linear(inp, inDim, outDim, dropout = 1.0,
- batchNorm = None, addBias = True, bias = 0.0,
- act = "NON", actLayer = True, actDropout = 1.0,
- retVars = False, name = "", reuse = None):
-
- with tf.variable_scope("linearLayer" + name, reuse = reuse):
- W = getWeight((inDim, outDim) if outDim > 1 else (inDim, ))
- b = getBias((outDim, ) if outDim > 1 else ()) + bias
-
- if batchNorm is not None:
- inp = tf.contrib.layers.batch_norm(inp, decay = batchNorm["decay"],
- center = True, scale = True, is_training = batchNorm["train"], updates_collections = None)
- # tf.layers.batch_normalization, axis -1 ?
-
- inp = tf.nn.dropout(inp, dropout)
-
- if outDim > 1:
- output = multiply(inp, W)
- else:
- output = tf.reduce_sum(inp * W, axis = -1)
-
- if addBias:
- output += b
-
- output = activations[act](output)
-
- # good?
- if act != "NON" and actLayer:
- output = linear(output, outDim, outDim, dropout = actDropout, batchNorm = batchNorm,
- addBias = addBias, act = "NON", actLayer = False,
- name = name + "_2", reuse = reuse)
-
- if retVars:
- return (output, (W, b))
-
- return output
-
-'''
-Computes Multi-layer feed-forward network.
-
-Args:
- features: input features
- dims: list with dimensions of network.
- First dimension is of the inputs, final is of the outputs.
- batchNorm: if not None, applies batchNorm
- dropout: dropout value to apply for each layer
- act: activation to apply between layers.
- NON, TANH, SIGMOID, RELU, ELU
-'''
-# no activation after last layer
-# batchNorm = {"decay": float, "train": Tensor}
-def FCLayer(features, dims, batchNorm = None, dropout = 1.0, act = "RELU"):
- layersNum = len(dims) - 1
-
- for i in range(layersNum):
- features = linear(features, dims[i], dims[i+1], name = "fc_%d" % i,
- batchNorm = batchNorm, dropout = dropout)
- # not the last layer
- if i < layersNum - 1:
- features = activations[act](features)
-
- return features
-
-###################################### cnns ######################################
-
-'''
-Computes convolution.
-
-Args:
- inp: input features
- inDim: input dimension
- outDim: output dimension
- batchNorm: if not None, applies batchNorm on inputs
- dropout: dropout value to apply on inputs
- addBias: True to add bias
- kernelSize: kernel size
- stride: stride size
- act: activation to apply on outputs
- NON, TANH, SIGMOID, RELU, ELU
-'''
-# batchNorm = {"decay": float, "train": Tensor, "center": bool, "scale": bool}
-# collections.namedtuple("batchNorm", ("decay", "train"))
-def cnn(inp, inDim, outDim, batchNorm = None, dropout = 1.0, addBias = True,
- kernelSize = None, stride = 1, act = "NON", name = "", reuse = None):
-
- with tf.variable_scope("cnnLayer" + name, reuse = reuse):
-
- if kernelSize is None:
- kernelSize = config.stemKernelSize
- kernelH = kernelW = kernelSize
-
- kernel = getKernel((kernelH, kernelW, inDim, outDim))
- b = getBias((outDim, ))
-
- if batchNorm is not None:
- inp = tf.contrib.layers.batch_norm(inp, decay = batchNorm["decay"], center = batchNorm["center"],
- scale = batchNorm["scale"], is_training = batchNorm["train"], updates_collections = None)
-
- inp = tf.nn.dropout(inp, dropout)
-
- output = tf.nn.conv2d(inp, filter = kernel, strides = [1, stride, stride, 1], padding = "SAME")
-
- if addBias:
- output += b
-
- output = activations[act](output)
-
- return output
-
-'''
-Computes Multi-layer convolutional network.
-
-Args:
- features: input features
- dims: list with dimensions of network.
- First dimension is of the inputs. Final is of the outputs.
- batchNorm: if not None, applies batchNorm
- dropout: dropout value to apply for each layer
- kernelSizes: list of kernel sizes for each layer. Default to config.stemKernelSize
- strides: list of strides for each layer. Default to 1.
- act: activation to apply between layers.
- NON, TANH, SIGMOID, RELU, ELU
-'''
-# batchNorm = {"decay": float, "train": Tensor, "center": bool, "scale": bool}
-# activation after last layer
-def CNNLayer(features, dims, batchNorm = None, dropout = 1.0,
- kernelSizes = None, strides = None, act = "RELU"):
-
- layersNum = len(dims) - 1
-
- if kernelSizes is None:
- kernelSizes = [config.stemKernelSize for i in range(layersNum)]
-
- if strides is None:
- strides = [1 for i in range(layersNum)]
-
- for i in range(layersNum):
- features = cnn(features, dims[i], dims[i+1], name = "cnn_%d" % i, batchNorm = batchNorm,
- dropout = dropout, kernelSize = kernelSizes[i], stride = strides[i], act = act)
-
- return features
-
-######################################## location ########################################
-
-'''
-Computes linear positional encoding for h x w grid.
-If outDim positive, casts positions to that dimension.
-'''
-# ignores dim
-# h,w can be tensor scalars
-def locationL(h, w, dim, outDim = -1, addBias = True):
- dim = 2
- grid = tf.stack(tf.meshgrid(tf.linspace(-config.locationBias, config.locationBias, w),
- tf.linspace(-config.locationBias, config.locationBias, h)), axis = -1)
-
- if outDim > 0:
- grid = linear(grid, dim, outDim, addBias = addBias, name = "locationL")
- dim = outDim
-
- return grid, dim
-
-'''
-Computes sin/cos positional encoding for h x w x (4*dim).
-If outDim positive, casts positions to that dimension.
-Based on positional encoding presented in "Attention is all you need"
-'''
-# dim % 4 = 0
-# h,w can be tensor scalars
-def locationPE(h, w, dim, outDim = -1, addBias = True):
- x = tf.expand_dims(tf.to_float(tf.linspace(-config.locationBias, config.locationBias, w)), axis = -1)
- y = tf.expand_dims(tf.to_float(tf.linspace(-config.locationBias, config.locationBias, h)), axis = -1)
- i = tf.expand_dims(tf.to_float(tf.range(dim)), axis = 0)
-
- peSinX = tf.sin(x / (tf.pow(10000.0, i / dim)))
- peCosX = tf.cos(x / (tf.pow(10000.0, i / dim)))
- peSinY = tf.sin(y / (tf.pow(10000.0, i / dim)))
- peCosY = tf.cos(y / (tf.pow(10000.0, i / dim)))
-
- peSinX = tf.tile(tf.expand_dims(peSinX, axis = 0), [h, 1, 1])
- peCosX = tf.tile(tf.expand_dims(peCosX, axis = 0), [h, 1, 1])
- peSinY = tf.tile(tf.expand_dims(peSinY, axis = 1), [1, w, 1])
- peCosY = tf.tile(tf.expand_dims(peCosY, axis = 1), [1, w, 1])
-
- grid = tf.concat([peSinX, peCosX, peSinY, peCosY], axis = -1)
- dim *= 4
-
- if outDim > 0:
- grid = linear(grid, dim, outDim, addBias = addBias, name = "locationPE")
- dim = outDim
-
- return grid, dim
-
-locations = {
- "L": locationL,
- "PE": locationPE
-}
-
-'''
-Adds positional encoding to features. May ease spatial reasoning.
-(although not used in the default model).
-
-Args:
- features: features to add position encoding to.
- [batchSize, h, w, c]
-
- inDim: number of features' channels
- lDim: dimension for positional encodings
- outDim: if positive, cast enhanced features (with positions) to that dimension
- h: features' height
- w: features' width
- locType: L for linear encoding, PE for cos/sin based positional encoding
- mod: way to add positional encoding: concatenation (CNCT), addition (ADD),
- multiplication (MUL), linear transformation (LIN).
-'''
-mods = ["CNCT", "ADD", "LIN", "MUL"]
-# if outDim = -1, then will be set based on inDim, lDim
-def addLocation(features, inDim, lDim, outDim = -1, h = None, w = None,
- locType = "L", mod = "CNCT", name = "", reuse = None): # h,w not needed
-
- with tf.variable_scope("addLocation" + name, reuse = reuse):
- batchSize = tf.shape(features)[0]
- if h is None:
- h = tf.shape(features)[1]
- if w is None:
- w = tf.shape(features)[2]
- dim = inDim
-
- if mod == "LIN":
- if outDim < 0:
- outDim = dim
-
- grid, _ = locations[locType](h, w, lDim, outDim = outDim, addBias = False)
- features = linear(features, dim, outDim, name = "LIN")
- features += grid
- return features, outDim
-
- if mod == "CNCT":
- grid, lDim = locations[locType](h, w, lDim)
- # grid = tf.zeros_like(features) + grid
- grid = tf.tile(tf.expand_dims(grid, axis = 0), [batchSize, 1, 1, 1])
- features = tf.concat([features, grid], axis = -1)
- dim += lDim
-
- elif mod == "ADD":
- grid, _ = locations[locType](h, w, lDim, outDim = dim)
- features += grid
-
- elif mod == "MUL": # MUL
- grid, _ = locations[locType](h, w, lDim, outDim = dim)
-
- if outDim < 0:
- outDim = dim
-
- grid = tf.tile(tf.expand_dims(grid, axis = 0), [batchSize, 1, 1, 1])
- features = tf.concat([features, grid, features * grid], axis = -1)
- dim *= 3
-
- if outDim > 0:
- features = linear(features, dim, outDim)
- dim = outDim
-
- return features, dim
-
-# config.locationAwareEnd
-# H, W, _ = config.imageDims
-# projDim = config.stemProjDim
-# k = config.stemProjPooling
-# projDim on inDim or on out
-# inDim = tf.shape(features)[3]
-
-'''
-Linearize 2d image to linear vector.
-
-Args:
- features: batch of 2d images.
- [batchSize, h, w, inDim]
-
- h: image height
-
- w: image width
-
- inDim: number of channels
-
- projDim: if not None, project image to that dimension before linearization
-
- outDim: if not None, project image to that dimension after linearization
-
- loc: if not None, add positional encoding:
- locType: L for linear encoding, PE for cos/sin based positional encoding
- mod: way to add positional encoding: concatenation (CNCT), addition (ADD),
- multiplication (MUL), linear transformation (LIN).
- pooling: number to pool image with before linearization.
-
-Returns linearized image:
-[batchSize, outDim] (or [batchSize, (h / pooling) * (w /pooling) * projDim] if outDim not supported)
-'''
-# loc = {"locType": str, "mod": str}
-def linearizeFeatures(features, h, w, inDim, projDim = None, outDim = None,
- loc = None, pooling = None):
-
- if pooling is None:
- pooling = config.imageLinPool
-
- if loc is not None:
- features = addLocation(features, inDim, lDim = inDim, outDim = inDim,
- locType = loc["locType"], mod = loc["mod"])
-
- if projDim is not None:
- features = linear(features, dim, projDim)
- features = relu(features)
- dim = projDim
-
- if pooling > 1:
- poolingDims = [1, pooling, pooling, 1]
- features = tf.nn.max_pool(features, ksize = poolingDims, strides = poolingDims,
- padding = "SAME")
- h /= pooling
- w /= pooling
-
- dim = h * w * dim
- features = tf.reshape(features, (-1, dim))
-
- if outDim is not None:
- features = linear(features, dim, outDim)
- dim = outDim
-
- return features, dim
-
-################################### multiplication ###################################
-# specific dim / proj for x / y
-'''
-"Enhanced" hadamard product between x and y:
-1. Supports optional projection of x, and y prior to multiplication.
-2. Computes simple multiplication, or a parametrized one, using diagonal of complete matrix (bi-linear)
-3. Optionally concatenate x or y or their projection to the multiplication result.
-
-Support broadcasting
-
-Args:
- x: left-hand side argument
- [batchSize, dim]
-
- y: right-hand side argument
- [batchSize, dim]
-
- dim: input dimension of x and y
-
- dropout: dropout value to apply on x and y
-
- proj: if not None, project x and y:
- dim: projection dimension
- shared: use same projection for x and y
- dropout: dropout to apply to x and y if projected
-
- interMod: multiplication type:
- "MUL": x * y
- "DIAG": x * W * y for a learned diagonal parameter W
- "BL": x' W y for a learned matrix W
-
- concat: if not None, concatenate x or y or their projection.
-
- mulBias: optional bias to stabilize multiplication (x * bias) (y * bias)
-
-Returns the multiplication result
-[batchSize, outDim] when outDim depends on the use of proj and cocnat arguments.
-'''
-# proj = {"dim": int, "shared": bool, "dropout": float} # "act": str, "actDropout": float
-## interMod = ["direct", "scalarW", "bilinear"] # "additive"
-# interMod = ["MUL", "DIAG", "BL", "ADD"]
-# concat = {"x": bool, "y": bool, "proj": bool}
-def mul(x, y, dim, dropout = 1.0, proj = None, interMod = "MUL", concat = None, mulBias = None,
- extendY = True, name = "", reuse = None):
-
- with tf.variable_scope("mul" + name, reuse = reuse):
- origVals = {"x": x, "y": y, "dim": dim}
-
- x = tf.nn.dropout(x, dropout)
- y = tf.nn.dropout(y, dropout)
- # projection
- if proj is not None:
- x = tf.nn.dropout(x, proj.get("dropout", 1.0))
- y = tf.nn.dropout(y, proj.get("dropout", 1.0))
-
- if proj["shared"]:
- xName, xReuse = "proj", None
- yName, yReuse = "proj", True
- else:
- xName, xReuse = "projX", None
- yName, yReuse = "projY", None
-
- x = linear(x, dim, proj["dim"], name = xName, reuse = xReuse)
- y = linear(y, dim, proj["dim"], name = yName, reuse = yReuse)
- dim = proj["dim"]
- projVals = {"x": x, "y": y, "dim": dim}
- proj["x"], proj["y"] = x, y
-
- if extendY:
- y = tf.expand_dims(y, axis = -2)
- # broadcasting to have the same shape
- y = tf.zeros_like(x) + y
-
- # multiplication
- if interMod == "MUL":
- if mulBias is None:
- mulBias = config.mulBias
- output = (x + mulBias) * (y + mulBias)
- elif interMod == "DIAG":
- W = getWeight((dim, )) # change initialization?
- b = getBias((dim, ))
- activations = x * W * y + b
- elif interMod == "BL":
- W = getWeight((dim, dim))
- b = getBias((dim, ))
- output = multiply(x, W) * y + b
- else: # "ADD"
- output = tf.tanh(x + y)
- # concatenation
- if concat is not None:
- concatVals = projVals if concat.get("proj", False) else origVals
- if concat.get("x", False):
- output = tf.concat([output, concatVals["x"]], axis = -1)
- dim += concatVals["dim"]
-
- if concat.get("y", False):
- output = ops.concat(output, concatVals["y"], extendY = extendY)
- dim += concatVals["dim"]
-
- return output, dim
-
-######################################## rnns ########################################
-
-'''
-Creates an RNN cell.
-
-Args:
- hdim: the hidden dimension of the RNN cell.
-
- reuse: whether the cell should reuse parameters or create new ones.
-
- cellType: the cell type
- RNN, GRU, LSTM, MiGRU, MiLSTM, ProjLSTM
-
- act: the cell activation
- NON, TANH, SIGMOID, RELU, ELU
-
- projDim: if ProjLSTM, the dimension for the states projection
-
-Returns the cell.
-'''
-# tf.nn.rnn_cell.MultiRNNCell([cell(hDim, reuse = reuse) for _ in config.encNumLayers])
-# note that config.enc params not general
-def createCell(hDim, reuse, cellType = None, act = None, projDim = None):
- if cellType is None:
- cellType = config.encType
-
- activation = activations.get(act, None)
-
- if cellType == "ProjLSTM":
- cell = tf.nn.rnn_cell.LSTMCell
- if projDim is None:
- projDim = config.cellDim
- cell = cell(hDim, num_proj = projDim, reuse = reuse, activation = activation)
- return cell
-
- cells = {
- "RNN": tf.nn.rnn_cell.BasicRNNCell,
- "GRU": tf.nn.rnn_cell.GRUCell,
- "LSTM": tf.nn.rnn_cell.BasicLSTMCell,
- "MiGRU": MiGRUCell,
- "MiLSTM": MiLSTMCell
- }
-
- cell = cells[cellType](hDim, reuse = reuse, activation = activation)
-
- return cell
-
-'''
-Runs an forward RNN layer.
-
-Args:
- inSeq: the input sequence to run the RNN over.
- [batchSize, sequenceLength, inDim]
-
- seqL: the sequence matching lengths.
- [batchSize, 1]
-
- hDim: hidden dimension of the RNN.
-
- cellType: the cell type
- RNN, GRU, LSTM, MiGRU, MiLSTM, ProjLSTM
-
- dropout: value for dropout over input sequence
-
- varDp: if not None, state and input variational dropouts to apply.
- dimension of input has to be supported (inputSize).
-
-Returns the outputs sequence and final RNN state.
-'''
-# varDp = {"stateDp": float, "inputDp": float, "inputSize": int}
-# proj = {"output": bool, "state": bool, "dim": int, "dropout": float, "act": str}
-def fwRNNLayer(inSeq, seqL, hDim, cellType = None, dropout = 1.0, varDp = None,
- name = "", reuse = None): # proj = None
-
- with tf.variable_scope("rnnLayer" + name, reuse = reuse):
- batchSize = tf.shape(inSeq)[0]
-
- cell = createCell(hDim, reuse, cellType) # passing reuse isn't mandatory
-
- if varDp is not None:
- cell = tf.contrib.rnn.DropoutWrapper(cell,
- state_keep_prob = varDp["stateDp"],
- input_keep_prob = varDp["inputDp"],
- variational_recurrent = True, input_size = varDp["inputSize"], dtype = tf.float32)
- else:
- inSeq = tf.nn.dropout(inSeq, dropout)
-
- initialState = cell.zero_state(batchSize, tf.float32)
-
- outSeq, lastState = tf.nn.dynamic_rnn(cell, inSeq,
- sequence_length = seqL,
- initial_state = initialState,
- swap_memory = True)
-
- if isinstance(lastState, tf.nn.rnn_cell.LSTMStateTuple):
- lastState = lastState.h
-
- # if proj is not None:
- # if proj["output"]:
- # outSeq = linear(outSeq, cell.output_size, proj["dim"], act = proj["act"],
- # dropout = proj["dropout"], name = "projOutput")
-
- # if proj["state"]:
- # lastState = linear(lastState, cell.state_size, proj["dim"], act = proj["act"],
- # dropout = proj["dropout"], name = "projState")
-
- return outSeq, lastState
-
-'''
-Runs an bidirectional RNN layer.
-
-Args:
- inSeq: the input sequence to run the RNN over.
- [batchSize, sequenceLength, inDim]
-
- seqL: the sequence matching lengths.
- [batchSize, 1]
-
- hDim: hidden dimension of the RNN.
-
- cellType: the cell type
- RNN, GRU, LSTM, MiGRU, MiLSTM
-
- dropout: value for dropout over input sequence
-
- varDp: if not None, state and input variational dropouts to apply.
- dimension of input has to be supported (inputSize).
-
-Returns the outputs sequence and final RNN state.
-'''
-# varDp = {"stateDp": float, "inputDp": float, "inputSize": int}
-# proj = {"output": bool, "state": bool, "dim": int, "dropout": float, "act": str}
-def biRNNLayer(inSeq, seqL, hDim, cellType = None, dropout = 1.0, varDp = None,
- name = "", reuse = None): # proj = None,
-
- with tf.variable_scope("birnnLayer" + name, reuse = reuse):
- batchSize = tf.shape(inSeq)[0]
-
- with tf.variable_scope("fw"):
- cellFw = createCell(hDim, reuse, cellType)
- with tf.variable_scope("bw"):
- cellBw = createCell(hDim, reuse, cellType)
-
- if varDp is not None:
- cellFw = tf.contrib.rnn.DropoutWrapper(cellFw,
- state_keep_prob = varDp["stateDp"],
- input_keep_prob = varDp["inputDp"],
- variational_recurrent = True, input_size = varDp["inputSize"], dtype = tf.float32)
-
- cellBw = tf.contrib.rnn.DropoutWrapper(cellBw,
- state_keep_prob = varDp["stateDp"],
- input_keep_prob = varDp["inputDp"],
- variational_recurrent = True, input_size = varDp["inputSize"], dtype = tf.float32)
- else:
- inSeq = tf.nn.dropout(inSeq, dropout)
-
- initialStateFw = cellFw.zero_state(batchSize, tf.float32)
- initialStateBw = cellBw.zero_state(batchSize, tf.float32)
-
- (outSeqFw, outSeqBw), (lastStateFw, lastStateBw) = tf.nn.bidirectional_dynamic_rnn(
- cellFw, cellBw, inSeq,
- sequence_length = seqL,
- initial_state_fw = initialStateFw,
- initial_state_bw = initialStateBw,
- swap_memory = True)
-
- if isinstance(lastStateFw, tf.nn.rnn_cell.LSTMStateTuple):
- lastStateFw = lastStateFw.h # take c?
- lastStateBw = lastStateBw.h
-
- outSeq = tf.concat([outSeqFw, outSeqBw], axis = -1)
- lastState = tf.concat([lastStateFw, lastStateBw], axis = -1)
-
- # if proj is not None:
- # if proj["output"]:
- # outSeq = linear(outSeq, cellFw.output_size + cellFw.output_size,
- # proj["dim"], act = proj["act"], dropout = proj["dropout"],
- # name = "projOutput")
-
- # if proj["state"]:
- # lastState = linear(lastState, cellFw.state_size + cellFw.state_size,
- # proj["dim"], act = proj["act"], dropout = proj["dropout"],
- # name = "projState")
-
- return outSeq, lastState
-
-# int(hDim / 2) for biRNN?
-'''
-Runs an RNN layer by calling biRNN or fwRNN.
-
-Args:
- inSeq: the input sequence to run the RNN over.
- [batchSize, sequenceLength, inDim]
-
- seqL: the sequence matching lengths.
- [batchSize, 1]
-
- hDim: hidden dimension of the RNN.
-
- bi: true to run bidirectional rnn.
-
- cellType: the cell type
- RNN, GRU, LSTM, MiGRU, MiLSTM
-
- dropout: value for dropout over input sequence
-
- varDp: if not None, state and input variational dropouts to apply.
- dimension of input has to be supported (inputSize).
-
-Returns the outputs sequence and final RNN state.
-'''
-# proj = {"output": bool, "state": bool, "dim": int, "dropout": float, "act": str}
-# varDp = {"stateDp": float, "inputDp": float, "inputSize": int}
-def RNNLayer(inSeq, seqL, hDim, bi = None, cellType = None, dropout = 1.0, varDp = None,
- name = "", reuse = None): # proj = None
-
- with tf.variable_scope("rnnLayer" + name, reuse = reuse):
- if bi is None:
- bi = config.encBi
-
- rnn = biRNNLayer if bi else fwRNNLayer
-
- if bi:
- hDim = int(hDim / 2)
-
- return rnn(inSeq, seqL, hDim, cellType = cellType, dropout = dropout, varDp = varDp) # , proj = proj
-
-# tf counterpart?
-# hDim = config.moduleDim
-def multigridRNNLayer(featrues, h, w, dim, name = "", reuse = None):
- with tf.variable_scope("multigridRNNLayer" + name, reuse = reuse):
- featrues = linear(featrues, dim, dim / 2, name = "i")
-
- output0 = gridRNNLayer(featrues, h, w, dim, right = True, down = True, name = "rd")
- output1 = gridRNNLayer(featrues, h, w, dim, right = True, down = False, name = "r")
- output2 = gridRNNLayer(featrues, h, w, dim, right = False, down = True, name = "d")
- output3 = gridRNNLayer(featrues, h, w, dim, right = False, down = False, name = "NON")
-
- output = tf.concat([output0, output1, output2, output3], axis = -1)
- output = linear(output, 2 * dim, dim, name = "o")
-
- return outputs
-
-# h,w should be constants
-def gridRNNLayer(features, h, w, dim, right, down, name = "", reuse = None):
- with tf.variable_scope("gridRNNLayer" + name):
- batchSize = tf.shape(features)[0]
-
- cell = createCell(dim, reuse = reuse, cellType = config.stemGridRnnMod,
- act = config.stemGridAct)
-
- initialState = cell.zero_state(batchSize, tf.float32)
-
- inputs = [tf.unstack(row, w, axis = 1) for row in tf.unstack(features, h, axis = 1)]
- states = [[None for _ in range(w)] for _ in range(h)]
-
- iAxis = range(h) if down else (range(h)[::-1])
- jAxis = range(w) if right else (range(w)[::-1])
-
- iPrev = -1 if down else 1
- jPrev = -1 if right else 1
-
- prevState = lambda i,j: states[i][j] if (i >= 0 and i < h and j >= 0 and j < w) else initialState
-
- for i in iAxis:
- for j in jAxis:
- prevs = tf.concat((prevState(i + iPrev, j), prevState(i, j + jPrev)), axis = -1)
- curr = inputs[i][j]
- _, states[i][j] = cell(prevs, curr)
-
- outputs = [tf.stack(row, axis = 1) for row in states]
- outputs = tf.stack(outputs, axis = 1)
-
- return outputs
-
-# tf seq2seq?
-# def projRNNLayer(inSeq, seqL, hDim, labels, labelsNum, labelsDim, labelsEmb, name = "", reuse = None):
-# with tf.variable_scope("projRNNLayer" + name):
-# batchSize = tf.shape(features)[0]
-
-# cell = createCell(hDim, reuse = reuse)
-
-# projCell = ProjWrapper(cell, labelsNum, labelsDim, labelsEmb, # config.wrdEmbDim
-# feedPrev = True, dropout = 1.0, config,
-# temperature = 1.0, clevr_sample = False, reuse)
-
-# initialState = projCell.zero_state(batchSize, tf.float32)
-
-# if config.soft:
-# inSeq = inSeq
-
-# # outputs, _ = tf.nn.static_rnn(projCell, inputs,
-# # sequence_length = seqL,
-# # initial_state = initialState)
-
-# inSeq = tf.unstack(inSeq, axis = 1)
-# state = initialState
-# logitsList = []
-# chosenList = []
-
-# for inp in inSeq:
-# (logits, chosen), state = projCell(inp, state)
-# logitsList.append(logits)
-# chosenList.append(chosen)
-# projCell.reuse = True
-
-# logitsOut = tf.stack(logitsList, axis = 1)
-# chosenOut = tf.stack(chosenList, axis = 1)
-# outputs = (logitsOut, chosenOut)
-# else:
-# labels = tf.to_float(labels)
-# labels = tf.concat([tf.zeros((batchSize, 1)), labels], axis = 1)[:, :-1] # ,newaxis
-# inSeq = tf.concat([inSeq, tf.expand_dims(labels, axis = -1)], axis = -1)
-
-# outputs, _ = tf.nn.dynamic_rnn(projCell, inSeq,
-# sequence_length = seqL,
-# initial_state = initialState,
-# swap_memory = True)
-
-# return outputs #, labelsEmb
-
-############################### variational dropout ###############################
-
-'''
-Generates a variational dropout mask for a given shape and a dropout
-probability value.
-'''
-def generateVarDpMask(shape, keepProb):
- randomTensor = tf.to_float(keepProb)
- randomTensor += tf.random_uniform(shape, minval = 0, maxval = 1)
- binaryTensor = tf.floor(randomTensor)
- mask = tf.to_float(binaryTensor)
- return mask
-
-'''
-Applies the a variational dropout over an input, given dropout mask
-and a dropout probability value.
-'''
-def applyVarDpMask(inp, mask, keepProb):
- ret = (tf.div(inp, tf.to_float(keepProb))) * mask
- return ret
diff --git a/spaces/ChandraMohanNayal/AutoGPT/autogpt/config/ai_config.py b/spaces/ChandraMohanNayal/AutoGPT/autogpt/config/ai_config.py
deleted file mode 100644
index d50c30beee9dc8009f63415378ae1c6a399f0037..0000000000000000000000000000000000000000
--- a/spaces/ChandraMohanNayal/AutoGPT/autogpt/config/ai_config.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# sourcery skip: do-not-use-staticmethod
-"""
-A module that contains the AIConfig class object that contains the configuration
-"""
-from __future__ import annotations
-
-import os
-from typing import Type
-
-import yaml
-
-
-class AIConfig:
- """
- A class object that contains the configuration information for the AI
-
- Attributes:
- ai_name (str): The name of the AI.
- ai_role (str): The description of the AI's role.
- ai_goals (list): The list of objectives the AI is supposed to complete.
- """
-
- def __init__(
- self, ai_name: str = "", ai_role: str = "", ai_goals: list | None = None
- ) -> None:
- """
- Initialize a class instance
-
- Parameters:
- ai_name (str): The name of the AI.
- ai_role (str): The description of the AI's role.
- ai_goals (list): The list of objectives the AI is supposed to complete.
- Returns:
- None
- """
- if ai_goals is None:
- ai_goals = []
- self.ai_name = ai_name
- self.ai_role = ai_role
- self.ai_goals = ai_goals
-
- # Soon this will go in a folder where it remembers more stuff about the run(s)
- SAVE_FILE = os.path.join(os.path.dirname(__file__), "..", "ai_settings.yaml")
-
- @staticmethod
- def load(config_file: str = SAVE_FILE) -> "AIConfig":
- """
- Returns class object with parameters (ai_name, ai_role, ai_goals) loaded from
- yaml file if yaml file exists,
- else returns class with no parameters.
-
- Parameters:
- config_file (int): The path to the config yaml file.
- DEFAULT: "../ai_settings.yaml"
-
- Returns:
- cls (object): An instance of given cls object
- """
-
- try:
- with open(config_file, encoding="utf-8") as file:
- config_params = yaml.load(file, Loader=yaml.FullLoader)
- except FileNotFoundError:
- config_params = {}
-
- ai_name = config_params.get("ai_name", "")
- ai_role = config_params.get("ai_role", "")
- ai_goals = config_params.get("ai_goals", [])
- # type: Type[AIConfig]
- return AIConfig(ai_name, ai_role, ai_goals)
-
- def save(self, config_file: str = SAVE_FILE) -> None:
- """
- Saves the class parameters to the specified file yaml file path as a yaml file.
-
- Parameters:
- config_file(str): The path to the config yaml file.
- DEFAULT: "../ai_settings.yaml"
-
- Returns:
- None
- """
-
- config = {
- "ai_name": self.ai_name,
- "ai_role": self.ai_role,
- "ai_goals": self.ai_goals,
- }
- with open(config_file, "w", encoding="utf-8") as file:
- yaml.dump(config, file, allow_unicode=True)
-
- def construct_full_prompt(self) -> str:
- """
- Returns a prompt to the user with the class information in an organized fashion.
-
- Parameters:
- None
-
- Returns:
- full_prompt (str): A string containing the initial prompt for the user
- including the ai_name, ai_role and ai_goals.
- """
-
- prompt_start = (
- "Your decisions must always be made independently without"
- " seeking user assistance. Play to your strengths as an LLM and pursue"
- " simple strategies with no legal complications."
- ""
- )
-
- from autogpt.prompt import get_prompt
-
- # Construct full prompt
- full_prompt = (
- f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
- )
- for i, goal in enumerate(self.ai_goals):
- full_prompt += f"{i+1}. {goal}\n"
-
- full_prompt += f"\n\n{get_prompt()}"
- return full_prompt
diff --git a/spaces/ChandraMohanNayal/AutoGPT/tests/unit/test_commands.py b/spaces/ChandraMohanNayal/AutoGPT/tests/unit/test_commands.py
deleted file mode 100644
index ecbac9b73bd9ad872931d77e144dd853b3d8ef64..0000000000000000000000000000000000000000
--- a/spaces/ChandraMohanNayal/AutoGPT/tests/unit/test_commands.py
+++ /dev/null
@@ -1,22 +0,0 @@
-"""Unit tests for the commands module"""
-from unittest.mock import MagicMock, patch
-
-import pytest
-
-import autogpt.agent.agent_manager as agent_manager
-from autogpt.app import execute_command, list_agents, start_agent
-
-
-@pytest.mark.integration_test
-def test_make_agent() -> None:
- """Test the make_agent command"""
- with patch("openai.ChatCompletion.create") as mock:
- obj = MagicMock()
- obj.response.choices[0].messages[0].content = "Test message"
- mock.return_value = obj
- start_agent("Test Agent", "chat", "Hello, how are you?", "gpt2")
- agents = list_agents()
- assert "List of agents:\n0: chat" == agents
- start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt2")
- agents = list_agents()
- assert "List of agents:\n0: chat\n1: write" == agents
diff --git a/spaces/Chintan-Donda/KKMS-KSSW-HF/src/weather.py b/spaces/Chintan-Donda/KKMS-KSSW-HF/src/weather.py
deleted file mode 100644
index 266413b4427232a0ac3a1d5a21378f80689f55c2..0000000000000000000000000000000000000000
--- a/spaces/Chintan-Donda/KKMS-KSSW-HF/src/weather.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import requests
-from bs4 import BeautifulSoup as bs
-import src.constants as constants_utils
-
-
-class WEATHER:
- def __init__(self):
- self.base_url = 'https://nwp.imd.gov.in/blf/blf_temp'
- self.headers = {
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
- }
-
- self.state_names_codes = {}
- self.districts = []
-
-
- def get_state_names_codes(
- self
- ):
- response = requests.get(
- self.base_url,
- headers=self.headers,
- )
-
- soup = bs(response.text, 'html.parser')
- for option in soup.find_all('option'):
- if option.text.strip() == 'Select':
- continue
- self.state_names_codes[option.text.strip()] = str(option['value'].split('=')[-1][:2])
-
- return self.state_names_codes
-
-
- def get_district_names(
- self,
- state_name
- ):
- url = f"{self.base_url}/dis.php?value={constants_utils.WEATHER_FORECAST_STATE_CODES.get(state_name, '') + state_name}"
- response = requests.get(
- url,
- headers=self.headers,
- )
-
- soup = bs(response.text, 'html.parser')
- self.districts = soup.findAll('select', {'name': 'dis'}, limit=None)
- self.districts = [district.strip() for district in self.districts[0].text.split('\n') if district and district != 'Select']
- return self.districts
-
-
- # Weather forecast from Govt. website
- def get_weather_forecast(
- self,
- state,
- district,
- is_block_level=False
- ):
- self.district_url = f"{self.base_url}/block.php?dis={constants_utils.WEATHER_FORECAST_STATE_CODES.get(state, '') + district}"
- self.block_url = f'{self.base_url}/table2.php'
-
- response = requests.get(self.district_url if not is_block_level else self.block_url)
- soup = bs(response.text, 'html.parser')
- scripts = soup.findAll('font')[0]
- return scripts.text
-
-
- # Weather using Google weather API
- def get_weather(
- self,
- city
- ):
- city = city + " weather"
- city = city.replace(" ", "+")
-
- headers = {
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
- }
- response = requests.get(
- f'https://www.google.com/search?q={city}&oq={city}&aqs=chrome.0.35i39l2j0l4j46j69i60.6128j1j7&sourceid=chrome&ie=UTF-8', headers=headers)
-
- soup = bs(response.text, 'html.parser')
- location = soup.select('#wob_loc')[0].getText().strip()
- time = soup.select('#wob_dts')[0].getText().strip()
- info = soup.select('#wob_dc')[0].getText().strip()
- temperature = soup.select('#wob_tm')[0].getText().strip()
- temperature = temperature + "°C"
-
- return time, info, temperature
diff --git a/spaces/Cran-May/BetaSEA-Streamlit/README.md b/spaces/Cran-May/BetaSEA-Streamlit/README.md
deleted file mode 100644
index 75ed1444abb880d9f3cf663983fad0bf2b4278e8..0000000000000000000000000000000000000000
--- a/spaces/Cran-May/BetaSEA-Streamlit/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: 兮辞·析辞-常明
-emoji: 💻
-colorFrom: indigo
-colorTo: pink
-sdk: streamlit
-sdk_version: 1.27.2
-app_file: app.py
-pinned: true
-models:
-- Cran-May/SLIDE
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/vegalite/display.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/vegalite/display.py
deleted file mode 100644
index 91c5f33e093b32cf81accd6fdeeb8a18292c28c0..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/vegalite/display.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from ..utils.display import Displayable, default_renderer_base, json_renderer_base
-from ..utils.display import RendererRegistry, HTMLRenderer
-
-
-__all__ = (
- "Displayable",
- "default_renderer_base",
- "json_renderer_base",
- "RendererRegistry",
- "HTMLRenderer",
-)
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/J_S_T_F_.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/J_S_T_F_.py
deleted file mode 100644
index 111c700710e56f1f92703b212b530267313293ba..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/J_S_T_F_.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from .otBase import BaseTTXConverter
-
-
-class table_J_S_T_F_(BaseTTXConverter):
- pass
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/label.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/label.py
deleted file mode 100644
index 5a2c40fd387b7250cd75d3dfd7ade49ab5343b51..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/label.py
+++ /dev/null
@@ -1,182 +0,0 @@
-"""gr.Label() component."""
-
-from __future__ import annotations
-
-import operator
-from pathlib import Path
-from typing import Callable, Literal
-
-from gradio_client.documentation import document, set_documentation_group
-from gradio_client.serializing import (
- JSONSerializable,
-)
-
-from gradio.components.base import IOComponent, _Keywords
-from gradio.deprecation import warn_style_method_deprecation
-from gradio.events import (
- Changeable,
- EventListenerMethod,
- Selectable,
-)
-
-set_documentation_group("component")
-
-
-@document()
-class Label(Changeable, Selectable, IOComponent, JSONSerializable):
- """
- Displays a classification label, along with confidence scores of top categories, if provided.
- Preprocessing: this component does *not* accept input.
- Postprocessing: expects a {Dict[str, float]} of classes and confidences, or {str} with just the class or an {int}/{float} for regression outputs, or a {str} path to a .json file containing a json dictionary in the structure produced by Label.postprocess().
-
- Demos: main_note, titanic_survival
- Guides: image-classification-in-pytorch, image-classification-in-tensorflow, image-classification-with-vision-transformers, building-a-pictionary-app
- """
-
- CONFIDENCES_KEY = "confidences"
-
- def __init__(
- self,
- value: dict[str, float] | str | float | Callable | None = None,
- *,
- num_top_classes: int | None = None,
- label: str | None = None,
- every: float | None = None,
- show_label: bool | None = None,
- container: bool = True,
- scale: int | None = None,
- min_width: int = 160,
- visible: bool = True,
- elem_id: str | None = None,
- elem_classes: list[str] | str | None = None,
- color: str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: Default value to show in the component. If a str or number is provided, simply displays the string or number. If a {Dict[str, float]} of classes and confidences is provided, displays the top class on top and the `num_top_classes` below, along with their confidence bars. If callable, the function will be called whenever the app loads to set the initial value of the component.
- num_top_classes: number of most confident classes to show.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- container: If True, will place the component in a container - providing some extra padding around the border.
- scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.
- min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
- color: The background color of the label (either a valid css color name or hexadecimal string).
- """
- self.num_top_classes = num_top_classes
- self.color = color
- self.select: EventListenerMethod
- """
- Event listener for when the user selects a category from Label.
- Uses event data gradio.SelectData to carry `value` referring to name of selected category, and `index` to refer to index.
- See EventData documentation on how to use this event data.
- """
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- container=container,
- scale=scale,
- min_width=min_width,
- visible=visible,
- elem_id=elem_id,
- elem_classes=elem_classes,
- value=value,
- **kwargs,
- )
-
- def get_config(self):
- return {
- "num_top_classes": self.num_top_classes,
- "value": self.value,
- "color": self.color,
- "selectable": self.selectable,
- **IOComponent.get_config(self),
- }
-
- def postprocess(self, y: dict[str, float] | str | float | None) -> dict | None:
- """
- Parameters:
- y: a dictionary mapping labels to confidence value, or just a string/numerical label by itself
- Returns:
- Object with key 'label' representing primary label, and key 'confidences' representing a list of label-confidence pairs
- """
- if y is None or y == {}:
- return {}
- if isinstance(y, str) and y.endswith(".json") and Path(y).exists():
- return self.serialize(y)
- if isinstance(y, (str, float, int)):
- return {"label": str(y)}
- if isinstance(y, dict):
- if "confidences" in y and isinstance(y["confidences"], dict):
- y = y["confidences"]
- y = {c["label"]: c["confidence"] for c in y}
- sorted_pred = sorted(y.items(), key=operator.itemgetter(1), reverse=True)
- if self.num_top_classes is not None:
- sorted_pred = sorted_pred[: self.num_top_classes]
- return {
- "label": sorted_pred[0][0],
- "confidences": [
- {"label": pred[0], "confidence": pred[1]} for pred in sorted_pred
- ],
- }
- raise ValueError(
- "The `Label` output interface expects one of: a string label, or an int label, a "
- "float label, or a dictionary whose keys are labels and values are confidences. "
- f"Instead, got a {type(y)}"
- )
-
- @staticmethod
- def update(
- value: dict[str, float]
- | str
- | float
- | Literal[_Keywords.NO_VALUE]
- | None = _Keywords.NO_VALUE,
- label: str | None = None,
- show_label: bool | None = None,
- container: bool | None = None,
- scale: int | None = None,
- min_width: int | None = None,
- visible: bool | None = None,
- color: str | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- ):
- # If color is not specified (NO_VALUE) map it to None so that
- # it gets filtered out in postprocess. This will mean the color
- # will not be updated in the front-end
- if color is _Keywords.NO_VALUE:
- color = None
- # If the color was specified by the developer as None
- # Map is so that the color is updated to be transparent,
- # e.g. no background default state.
- elif color is None:
- color = "transparent"
- return {
- "label": label,
- "show_label": show_label,
- "container": container,
- "scale": scale,
- "min_width": min_width,
- "visible": visible,
- "value": value,
- "color": color,
- "__type__": "update",
- }
-
- def style(
- self,
- *,
- container: bool | None = None,
- ):
- """
- This method is deprecated. Please set these arguments in the constructor instead.
- """
- warn_style_method_deprecation()
- if container is not None:
- self.container = container
- return self
diff --git a/spaces/DaFujaTyping/hf-Chat-ui/src/routes/conversation/[id]/summarize/+server.ts b/spaces/DaFujaTyping/hf-Chat-ui/src/routes/conversation/[id]/summarize/+server.ts
deleted file mode 100644
index fcb4800aedbd249f49acbbecdfa10ae65405e7bd..0000000000000000000000000000000000000000
--- a/spaces/DaFujaTyping/hf-Chat-ui/src/routes/conversation/[id]/summarize/+server.ts
+++ /dev/null
@@ -1,77 +0,0 @@
-import { buildPrompt } from "$lib/buildPrompt";
-import { PUBLIC_SEP_TOKEN } from "$lib/constants/publicSepToken.js";
-import { collections } from "$lib/server/database.js";
-import { modelEndpoint } from "$lib/server/modelEndpoint.js";
-import { defaultModel } from "$lib/server/models.js";
-import { trimPrefix } from "$lib/utils/trimPrefix.js";
-import { trimSuffix } from "$lib/utils/trimSuffix.js";
-import { textGeneration } from "@huggingface/inference";
-import { error } from "@sveltejs/kit";
-import { ObjectId } from "mongodb";
-
-export async function POST({ params, locals, fetch }) {
- const convId = new ObjectId(params.id);
-
- const conversation = await collections.conversations.findOne({
- _id: convId,
- sessionId: locals.sessionId,
- });
-
- if (!conversation) {
- throw error(404, "Conversation not found");
- }
-
- const firstMessage = conversation.messages.find((m) => m.from === "user");
-
- const userPrompt =
- `Please summarize the following message as a single sentence of less than 5 words:\n` +
- firstMessage?.content;
-
- const prompt = buildPrompt([{ from: "user", content: userPrompt }], defaultModel);
-
- const parameters = {
- ...defaultModel.parameters,
- return_full_text: false,
- };
-
- const endpoint = modelEndpoint(defaultModel);
- let { generated_text } = await textGeneration(
- {
- model: endpoint.url,
- inputs: prompt,
- parameters,
- },
- {
- fetch: (url, options) =>
- fetch(url, {
- ...options,
- headers: { ...options?.headers, Authorization: endpoint.authorization },
- }),
- }
- );
-
- generated_text = trimSuffix(trimPrefix(generated_text, "<|startoftext|>"), PUBLIC_SEP_TOKEN);
-
- if (generated_text) {
- await collections.conversations.updateOne(
- {
- _id: convId,
- sessionId: locals.sessionId,
- },
- {
- $set: { title: generated_text },
- }
- );
- }
-
- return new Response(
- JSON.stringify(
- generated_text
- ? {
- title: generated_text,
- }
- : {}
- ),
- { headers: { "Content-Type": "application/json" } }
- );
-}
diff --git a/spaces/Dagfinn1962/prodia2/app.py b/spaces/Dagfinn1962/prodia2/app.py
deleted file mode 100644
index cd6da72e6c2522d67142ec6d970ca1d58a7db678..0000000000000000000000000000000000000000
--- a/spaces/Dagfinn1962/prodia2/app.py
+++ /dev/null
@@ -1,336 +0,0 @@
-import numpy as np
-import gradio as gr
-import ast
-import requests
-
-import logging
-from rembg import new_session
-from cutter import remove, make_label
-from utils import *
-
-API_URL_INITIAL = "https://ysharma-playground-ai-exploration.hf.space/run/initial_dataframe"
-API_URL_NEXT10 = "https://ysharma-playground-ai-exploration.hf.space/run/next_10_rows"
-
-#from theme_dropdown import create_theme_dropdown # noqa: F401
-
-from theme_dropdown import create_theme_dropdown # noqa: F401
-
-dropdown, js = create_theme_dropdown()
-
-models = [
- {"name": "❤ STABLE DIFFUSION MODELS ==========", "url": "stabilityai/stable-diffusion-2-1"},
- {"name": "SD ComVis 1.2","url": "CompVis/stable-diffusion-v1-2"},
- {"name": "SD Comvis 1.4","url": "CompVis/stable-diffusion-v1-4"},
- {"name": "SD runawayml 1.5","url": "runwayml/stable-diffusion-v1-5"},
- {"name": "SD stable-diffusion xl base 1.0","url": "timothymhowe/stable-diffusion-xl-base-1.0"},
- {"name": "SD NSFW","url": "digiplay/CamelliaMix_NSFW_diffusers_v1.1"},
-
- {"name": "SD Dreamshaper-Anime","url": "Lykon/DreamShaper"},
- {"name": "Dreamlike Anime","url": "dreamlike-art/dreamlike-photoreal-2.0"},
- {"name": "❤ REALISTIC PHOTO MODELS ==========", "url": "dreamlike-art/dreamlike-photoreal-2.0"},
- {"name": "AmiIReal", "url": "stablediffusionapi/amireal"},
- {"name": "Analog Diffusion", "url": "wavymulder/Analog-Diffusion"},
- {"name": "Circulus 2.8", "url": "circulus/sd-photoreal-v2.8"},
- {"name": "UltraSkin", "url": "VegaKH/Ultraskin"},
- {"name": "Wavyfusion", "url": "wavymulder/wavyfusion"},
- {"name": "❤ SEMI-REALISTIC MODELS ==========", "url": "stablediffusionapi/all-526"},
- {"name": "All 526", "url": "stablediffusionapi/all-526"},
- {"name": "All 526 animated", "url": "stablediffusionapi/all-526-animated"},
- {"name": "Circulus Semi Real 2", "url": "circulus/sd-photoreal-semi-v2"},
- {"name": "Semi Real Mix", "url": "robotjung/SemiRealMix"},
- {"name": "SpyBG", "url": "stablediffusionapi/spybg"},
- {"name": "Stable Diffusion 2", "url": "stabilityai/stable-diffusion-2-1"},
- {"name": "stability AI", "url": "stabilityai/stable-diffusion-2-1-base"},
- {"name": "Compressed-S-D", "url": "nota-ai/bk-sdm-small"},
- {"name": "Future Diffusion", "url": "nitrosocke/Future-Diffusion"},
- {"name": "JWST Deep Space Diffusion", "url": "dallinmackay/JWST-Deep-Space-diffusion"},
- {"name": "Robo Diffusion 3 Base", "url": "nousr/robo-diffusion-2-base"},
- {"name": "Robo Diffusion", "url": "nousr/robo-diffusion"},
- {"name": "Tron Legacy Diffusion", "url": "dallinmackay/Tron-Legacy-diffusion"},
- {"name": "❤ 3D ART MODELS ==========", "url": "DucHaiten/DucHaitenAIart"},
- {"name": "DucHaiten Art", "url": "DucHaiten/DucHaitenAIart"},
- {"name": "DucHaiten ClassicAnime", "url": "DucHaiten/DH_ClassicAnime"},
- {"name": "DucHaiten DreamWorld", "url": "DucHaiten/DucHaitenDreamWorld"},
- {"name": "DucHaiten Journey", "url": "DucHaiten/DucHaitenJourney"},
- {"name": "DucHaiten StyleLikeMe", "url": "DucHaiten/DucHaiten-StyleLikeMe"},
- {"name": "DucHaiten SuperCute", "url": "DucHaiten/DucHaitenSuperCute"},
- {"name": "Redshift Diffusion 768", "url": "nitrosocke/redshift-diffusion-768"},
- {"name": "Redshift Diffusion", "url": "nitrosocke/redshift-diffusion"},
-]
-
-
-#### REM-BG
-
-remove_bg_models = {
- "TracerUniversalB7": "TracerUniversalB7",
- "U2NET": "u2net",
- "U2NET Human Seg": "u2net_human_seg",
- "U2NET Cloth Seg": "u2net_cloth_seg"
-}
-
-model_choices = keys(remove_bg_models)
-
-
-def predict(image, session, smoot, matting, bg_color):
-
- session = new_session(remove_bg_models[session])
-
- try:
- return remove(session, image, smoot, matting, bg_color)
- except ValueError as err:
- logging.error(err)
- return make_label(str(err)), None
-
-
-def change_show_mask(chk_state):
- return gr.Image.update(visible=chk_state)
-
-
-def change_include_matting(chk_state):
- return gr.Box.update(visible=chk_state), (0, 0, 0), 0, 0, 0
-
-
-def change_foreground_threshold(fg_value, value):
- fg, bg, erode = value
- return fg_value, bg, erode
-
-
-def change_background_threshold(bg_value, value):
- fg, bg, erode = value
- return fg, bg_value, erode
-
-
-def change_erode_size(erode_value, value):
- fg, bg, erode = value
- return fg, bg, erode_value
-
-
-def set_dominant_color(chk_state):
- return chk_state, gr.ColorPicker.update(value=False, visible=not chk_state)
-
-
-def change_picker_color(picker, dominant):
- if not dominant:
- return picker
- return dominant
-
-
-def change_background_mode(chk_state):
- return gr.ColorPicker.update(value=False, visible=chk_state), \
- gr.Checkbox.update(value=False, visible=chk_state)
-
-
-
-###########
-
-text_gen = gr.Interface.load("spaces/daspartho/prompt-extend")
-
-current_model = models[0]
-
-models2 = []
-for model in models:
- model_url = f"models/{model['url']}"
- loaded_model = gr.Interface.load(model_url, live=True, preprocess=True)
- models2.append(loaded_model)
-
-def text_it(inputs, text_gen=text_gen):
- return text_gen(inputs)
-
-def flip_text(x):
- return x[::-1]
-
-def send_it(inputs, model_choice):
- proc = models2[model_choice]
- return proc(inputs)
-
-
-def flip_image(x):
- return np.fliplr(x)
-
-
-def set_model(current_model_index):
- global current_model
- current_model = models[current_model_index]
- return gr.update(value=f"{current_model['name']}")
-
-#define inference function
-#First: Get initial images for the grid display
-def get_initial_images():
- response = requests.post(API_URL_INITIAL, json={
- "data": []
- }).json()
- #data = response["data"][0]['data'][0][0][:-1]
- response_dict = response['data'][0]
- return response_dict #, [resp[0][:-1] for resp in response["data"][0]["data"]]
-
-#Second: Process response dictionary to get imges as hyperlinked image tags
-def process_response(response_dict):
- return [resp[0][:-1] for resp in response_dict["data"]]
-
-response_dict = get_initial_images()
-initial = process_response(response_dict)
-initial_imgs = '
\n' + "\n".join(initial[:-1])
-
-#Third: Load more images for the grid
-def get_next10_images(response_dict, row_count):
- row_count = int(row_count)
- #print("(1)",type(response_dict))
- #Convert the string to a dictionary
- if isinstance(response_dict, dict) == False :
- response_dict = ast.literal_eval(response_dict)
- response = requests.post(API_URL_NEXT10, json={
- "data": [response_dict, row_count ] #len(initial)-1
- }).json()
- row_count+=10
- response_dict = response['data'][0]
- #print("(2)",type(response))
- #print("(3)",type(response['data'][0]))
- next_set = [resp[0][:-1] for resp in response_dict["data"]]
- next_set_images = '
\n' + "\n".join(next_set[:-1])
- return response_dict, row_count, next_set_images #response['data'][0]
-
-
-with gr.Blocks(css ='main.css') as pan:
- gr.Markdown("MENU")
-
- with gr.Tab("TEXT TO IMAGE"):
-
- ##model = ("stabilityai/stable-diffusion-2-1")
- model_name1 = gr.Dropdown(
- label="Choose Model",
- choices=[m["name"] for m in models],
- type="index",
- value=current_model["name"],
- interactive=True,
- )
- input_text = gr.Textbox(label="Prompt idea",)
-
- ## run = gr.Button("Generate Images")
- with gr.Row():
- see_prompts = gr.Button("Generate Prompts")
- run = gr.Button("Generate Images", variant="primary")
-
- with gr.Row():
- magic1 = gr.Textbox(label="Generated Prompt", lines=2)
- output1 = gr.Image(label="")
-
-
- with gr.Row():
- magic2 = gr.Textbox(label="Generated Prompt", lines=2)
- output2 = gr.Image(label="")
-
-
- run.click(send_it, inputs=[magic1, model_name1], outputs=[output1])
- run.click(send_it, inputs=[magic2, model_name1], outputs=[output2])
- see_prompts.click(text_it, inputs=[input_text], outputs=[magic1])
- see_prompts.click(text_it, inputs=[input_text], outputs=[magic2])
-
- model_name1.change(set_model, inputs=model_name1, outputs=[output1, output2,])
-
- with gr.Tab("AI Library"):
- #Using Gradio Demos as API - This is Hot!
-#get_next10_images(response_dict=response_dict, row_count=9)
-#position: fixed; top: 0; left: 0; width: 100%; padding: 20px; box-shadow: 0 5px 10px rgba(0, 0, 0, 0.2);
-
-#Defining the Blocks layout
- # with gr.Blocks(css = """#img_search img {width: 100%; height: 100%; object-fit: cover;}""") as demo:
- gr.HTML(value="top of page", elem_id="top",visible=False)
- gr.HTML("""
-
-
- Using Gradio API - 2
-
- Stream < href="https://huggingface.co/collections/Dagfinn1962/images-64fc02ca304b8cb412ccda28" target="_blank">Collection Images ina beautiful grid
")
-
- with gr.Row(equal_height=False):
- with gr.Column():
- input_img = gr.Image(type="pil", label="Input image")
- drp_models = gr.Dropdown(choices=model_choices, label="Model Segment", value="TracerUniversalB7")
-
- with gr.Row():
- chk_include_matting = gr.Checkbox(label="Matting", value=False)
- chk_smoot_mask = gr.Checkbox(label="Smoot Mask", value=False)
- chk_show_mask = gr.Checkbox(label="Show Mask", value=False)
- with gr.Box(visible=False) as slider_matting:
- slr_fg_threshold = gr.Slider(0, 300, value=270, step=1, label="Alpha matting foreground threshold")
- slr_bg_threshold = gr.Slider(0, 50, value=20, step=1, label="Alpha matting background threshold")
- slr_erode_size = gr.Slider(0, 20, value=11, step=1, label="Alpha matting erode size")
- with gr.Box():
- with gr.Row():
- chk_change_color = gr.Checkbox(label="Change background color", value=False)
- pkr_color = gr.ColorPicker(label="Pick a new color", visible=False)
- chk_dominant = gr.Checkbox(label="Use dominant color", value=False, visible=False)
-
- #######################
- ############################
- #############################
- run_btn = gr.Button(value="Remove background", variant="primary")
-
- with gr.Column():
- output_img = gr.Image(type="pil", label="Image Result")
- mask_img = gr.Image(type="pil", label="Image Mask", visible=False)
- gr.ClearButton(components=[input_img, output_img, mask_img])
-
- chk_include_matting.change(change_include_matting, inputs=[chk_include_matting],
- outputs=[slider_matting, matting_state,
- slr_fg_threshold, slr_bg_threshold, slr_erode_size])
-
- slr_bg_threshold.change(change_background_threshold, inputs=[slr_bg_threshold, matting_state],
- outputs=[matting_state])
-
- slr_fg_threshold.change(change_foreground_threshold, inputs=[slr_fg_threshold, matting_state],
- outputs=[matting_state])
-
- slr_erode_size.change(change_erode_size, inputs=[slr_erode_size, matting_state],
- outputs=[matting_state])
-
- chk_show_mask.change(change_show_mask, inputs=[chk_show_mask], outputs=[mask_img])
-
- chk_change_color.change(change_background_mode, inputs=[chk_change_color],
- outputs=[pkr_color, chk_dominant])
-
- pkr_color.change(change_picker_color, inputs=[pkr_color, chk_dominant], outputs=[color_state])
-
- chk_dominant.change(set_dominant_color, inputs=[chk_dominant], outputs=[color_state, pkr_color])
-
- run_btn.click(predict, inputs=[input_img, drp_models, chk_smoot_mask, matting_state, color_state],
- outputs=[output_img, mask_img])
-
-
-
-# text_input = gr.Textbox() ## Diffuser
-# image_output = gr.Image()
-# image_button = gr.Button("Flip")
-
-
-
- # text_button.click(flip_text, inputs=text_input, outputs=text_output)
- # image_button.click(flip_image, inputs=image_input, outputs=image_output)
-pan.queue(concurrency_count=200)
-pan.launch(inline=True, show_api=True, max_threads=400 )
diff --git a/spaces/DaleChen/AutoGPT/run.bat b/spaces/DaleChen/AutoGPT/run.bat
deleted file mode 100644
index afbab57a0603a126b04845ec754d1ecf3fdea18d..0000000000000000000000000000000000000000
--- a/spaces/DaleChen/AutoGPT/run.bat
+++ /dev/null
@@ -1,8 +0,0 @@
-@echo off
-python scripts/check_requirements.py requirements.txt
-if errorlevel 1 (
- echo Installing missing packages...
- pip install -r requirements.txt
-)
-python -m autogpt %*
-pause
diff --git a/spaces/Daniton/superjourney/app.py b/spaces/Daniton/superjourney/app.py
deleted file mode 100644
index 2193905172b6fb6d868bff88cc8311f491ec13b3..0000000000000000000000000000000000000000
--- a/spaces/Daniton/superjourney/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/prompthero/openjourney").launch()
\ No newline at end of file
diff --git a/spaces/Dantra1/CeliaSensei/text/__init__.py b/spaces/Dantra1/CeliaSensei/text/__init__.py
deleted file mode 100644
index 663c4b6416affb53c9dc56dddbc8b2b65d4bf518..0000000000000000000000000000000000000000
--- a/spaces/Dantra1/CeliaSensei/text/__init__.py
+++ /dev/null
@@ -1,57 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-from text import cleaners
-from text.symbols import symbols
-
-
-# Mappings from symbol to numeric ID and vice versa:
-_symbol_to_id = {s: i for i, s in enumerate(symbols)}
-_id_to_symbol = {i: s for i, s in enumerate(symbols)}
-
-
-def text_to_sequence(text, symbols, cleaner_names):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- cleaner_names: names of the cleaner functions to run the text through
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
- sequence = []
-
- clean_text = _clean_text(text, cleaner_names)
- for symbol in clean_text:
- if symbol not in _symbol_to_id.keys():
- continue
- symbol_id = _symbol_to_id[symbol]
- sequence += [symbol_id]
- return sequence, clean_text
-
-
-def cleaned_text_to_sequence(cleaned_text):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()]
- return sequence
-
-
-def sequence_to_text(sequence):
- '''Converts a sequence of IDs back to a string'''
- result = ''
- for symbol_id in sequence:
- s = _id_to_symbol[symbol_id]
- result += s
- return result
-
-
-def _clean_text(text, cleaner_names):
- for name in cleaner_names:
- cleaner = getattr(cleaners, name)
- if not cleaner:
- raise Exception('Unknown cleaner: %s' % name)
- text = cleaner(text)
- return text
diff --git a/spaces/Dao3/DreamlikeArt-Diffusion-1.0/README.md b/spaces/Dao3/DreamlikeArt-Diffusion-1.0/README.md
deleted file mode 100644
index 43bb4c92438a78f83eca8f7f06051c81b01bb4ce..0000000000000000000000000000000000000000
--- a/spaces/Dao3/DreamlikeArt-Diffusion-1.0/README.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-title: DreamlikeArt-Diffusion 1.0
-emoji: 🧘🏻♂️
-colorFrom: blue
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.16.1
-app_file: app.py
-pinned: false
-duplicated_from: phenomenon1981/DreamlikeArt-Diffusion-1.0
----
----
-title: DreamlikeArt-Diffusion .0
-emoji: 🧘🏻♂️
-colorFrom: blue
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.16.1
-app_file: app.py
\ No newline at end of file
diff --git a/spaces/Dragonnext/Drago-Proxy/greeting.md b/spaces/Dragonnext/Drago-Proxy/greeting.md
deleted file mode 100644
index 56bf37f8d86ab390933477c5cb1741c78393b180..0000000000000000000000000000000000000000
--- a/spaces/Dragonnext/Drago-Proxy/greeting.md
+++ /dev/null
@@ -1,11 +0,0 @@
-**THIS PROXY IS PRIVATE USED ONLY BY ME TO TEST KEYS OR COOM MYSELF, USE UNICORN (TURBO) ONE (*THIS ONE WILL NEVER BE PUBLIC*)**
-
- https://huggingface.co/spaces/Dragonnext/Unicorn-proxy
-
-Contact with me:
-contactdrago@proton.me
-
-My private bots not promising good results (Feel free to share rentry):
-https://rentry.co/dragobots
-
-
diff --git a/spaces/ECCV2022/bytetrack/tutorials/centertrack/README.md b/spaces/ECCV2022/bytetrack/tutorials/centertrack/README.md
deleted file mode 100644
index b46bb2f0412c260c53d90bb5f8e5f2c387f748a5..0000000000000000000000000000000000000000
--- a/spaces/ECCV2022/bytetrack/tutorials/centertrack/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
-# CenterTrack
-
-Step1. git clone https://github.com/xingyizhou/CenterTrack.git
-
-
-Step2.
-
-replace https://github.com/xingyizhou/CenterTrack/blob/master/src/lib/utils/tracker.py
-
-replace https://github.com/xingyizhou/CenterTrack/blob/master/src/lib/opts.py
-
-
-Step3. run
-```
-python3 test.py tracking --exp_id mot17_half --dataset mot --dataset_version 17halfval --pre_hm --ltrb_amodal --load_model ../models/mot17_half.pth --track_thresh 0.4 --new_thresh 0.5 --out_thresh 0.2 --pre_thresh 0.5
-```
-
-
-# CenterTrack_BYTE
-
-Step1. git clone https://github.com/xingyizhou/CenterTrack.git
-
-
-Step2.
-
-replace https://github.com/xingyizhou/CenterTrack/blob/master/src/lib/utils/tracker.py by byte_tracker.py
-
-replace https://github.com/xingyizhou/CenterTrack/blob/master/src/lib/opts.py
-
-add mot_online to https://github.com/xingyizhou/CenterTrack/blob/master/src/lib/utils
-
-Step3. run
-```
-python3 test.py tracking --exp_id mot17_half --dataset mot --dataset_version 17halfval --pre_hm --ltrb_amodal --load_model ../models/mot17_half.pth --track_thresh 0.4 --new_thresh 0.5 --out_thresh 0.2 --pre_thresh 0.5
-```
-
-
-## Notes
-tracker.py: only motion
-
-byte_tracker.py: motion with kalman filter
-
diff --git a/spaces/EPFL-VILAB/MultiMAE/utils/layers/helpers.py b/spaces/EPFL-VILAB/MultiMAE/utils/layers/helpers.py
deleted file mode 100644
index e28234052d6b3c36845bd51e33de9b5855776877..0000000000000000000000000000000000000000
--- a/spaces/EPFL-VILAB/MultiMAE/utils/layers/helpers.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# --------------------------------------------------------
-# Based on timm and MAE-priv code bases
-# https://github.com/rwightman/pytorch-image-models/tree/master/timm
-# https://github.com/BUPT-PRIV/MAE-priv
-# --------------------------------------------------------
-
-""" Layer/Module Helpers
-
-Hacked together by / Copyright 2020 Ross Wightman
-"""
-import collections.abc
-from itertools import repeat
-
-
-# From PyTorch internals
-def _ntuple(n):
- def parse(x):
- if isinstance(x, collections.abc.Iterable):
- return x
- return tuple(repeat(x, n))
-
- return parse
-
-
-to_1tuple = _ntuple(1)
-to_2tuple = _ntuple(2)
-to_3tuple = _ntuple(3)
-to_4tuple = _ntuple(4)
-to_ntuple = _ntuple
-
-
-def make_divisible(v, divisor=8, min_value=None, round_limit=.9):
- min_value = min_value or divisor
- new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
- # Make sure that round down does not go down by more than 10%.
- if new_v < round_limit * v:
- new_v += divisor
- return new_v
diff --git a/spaces/Eunice0120/text_generator/README.md b/spaces/Eunice0120/text_generator/README.md
deleted file mode 100644
index 99d8c0b984f1a2cb66e08623dd9c3fd0a847375c..0000000000000000000000000000000000000000
--- a/spaces/Eunice0120/text_generator/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Text Generator
-emoji: 🏃
-colorFrom: blue
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.18.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/EuroPython2022/Warehouse_Apparel_Detection/templates/index.html b/spaces/EuroPython2022/Warehouse_Apparel_Detection/templates/index.html
deleted file mode 100644
index 9d60b551ef40b9b33e45c9e0c10dc32f005d41e0..0000000000000000000000000000000000000000
--- a/spaces/EuroPython2022/Warehouse_Apparel_Detection/templates/index.html
+++ /dev/null
@@ -1,351 +0,0 @@
-
-
-
-
-
-
-
- iNeuron
-
-
-
-
-
-
-
-
-
-
-
Warehouse Apparel Detection using YOLOv5
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Prediction Results
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/EuroPython2022/mmocr-demo/configs/textrecog/crnn/README.md b/spaces/EuroPython2022/mmocr-demo/configs/textrecog/crnn/README.md
deleted file mode 100644
index 52232587e512eb53f16e652e3f3afd0a53686faf..0000000000000000000000000000000000000000
--- a/spaces/EuroPython2022/mmocr-demo/configs/textrecog/crnn/README.md
+++ /dev/null
@@ -1,50 +0,0 @@
-# CRNN
-
-> [An end-to-end trainable neural network for image-based sequence recognition and its application to scene text recognition](https://arxiv.org/abs/1507.05717)
-
-
-
-## Abstract
-
-Image-based sequence recognition has been a long-standing research topic in computer vision. In this paper, we investigate the problem of scene text recognition, which is among the most important and challenging tasks in image-based sequence recognition. A novel neural network architecture, which integrates feature extraction, sequence modeling and transcription into a unified framework, is proposed. Compared with previous systems for scene text recognition, the proposed architecture possesses four distinctive properties: (1) It is end-to-end trainable, in contrast to most of the existing algorithms whose components are separately trained and tuned. (2) It naturally handles sequences in arbitrary lengths, involving no character segmentation or horizontal scale normalization. (3) It is not confined to any predefined lexicon and achieves remarkable performances in both lexicon-free and lexicon-based scene text recognition tasks. (4) It generates an effective yet much smaller model, which is more practical for real-world application scenarios. The experiments on standard benchmarks, including the IIIT-5K, Street View Text and ICDAR datasets, demonstrate the superiority of the proposed algorithm over the prior arts. Moreover, the proposed algorithm performs well in the task of image-based music score recognition, which evidently verifies the generality of it.
-
-
-
-
-
-## Dataset
-
-### Train Dataset
-
-| trainset | instance_num | repeat_num | note |
-| :------: | :----------: | :--------: | :---: |
-| Syn90k | 8919273 | 1 | synth |
-
-### Test Dataset
-
-| testset | instance_num | note |
-| :-----: | :----------: | :-------: |
-| IIIT5K | 3000 | regular |
-| SVT | 647 | regular |
-| IC13 | 1015 | regular |
-| IC15 | 2077 | irregular |
-| SVTP | 645 | irregular |
-| CT80 | 288 | irregular |
-
-## Results and models
-
-| methods | | Regular Text | | | | Irregular Text | | download |
-| :------------------------------------------------------: | :----: | :----------: | :--: | :-: | :--: | :------------: | :--: | :-----------------------------------------------------------------------------------------------: |
-| methods | IIIT5K | SVT | IC13 | | IC15 | SVTP | CT80 | |
-| [CRNN](/configs/textrecog/crnn/crnn_academic_dataset.py) | 80.5 | 81.5 | 86.5 | | 54.1 | 59.1 | 55.6 | [model](https://download.openmmlab.com/mmocr/textrecog/crnn/crnn_academic-a723a1c5.pth) \| [log](https://download.openmmlab.com/mmocr/textrecog/crnn/20210326_111035.log.json) |
-
-## Citation
-
-```bibtex
-@article{shi2016end,
- title={An end-to-end trainable neural network for image-based sequence recognition and its application to scene text recognition},
- author={Shi, Baoguang and Bai, Xiang and Yao, Cong},
- journal={IEEE transactions on pattern analysis and machine intelligence},
- year={2016}
-}
-```
diff --git a/spaces/FFusion/FFXL-SDXL-Convert-diffusers/convert.py b/spaces/FFusion/FFXL-SDXL-Convert-diffusers/convert.py
deleted file mode 100644
index 6f4877ae1204be1ad3142bf583ff0e24eac88b7e..0000000000000000000000000000000000000000
--- a/spaces/FFusion/FFXL-SDXL-Convert-diffusers/convert.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import gradio as gr
-import requests
-import os
-import shutil
-from pathlib import Path
-from typing import Any
-from tempfile import TemporaryDirectory
-from typing import Optional
-
-import torch
-from io import BytesIO
-
-from huggingface_hub import CommitInfo, Discussion, HfApi, hf_hub_download
-from huggingface_hub.file_download import repo_folder_name
-from diffusers import StableDiffusionXLPipeline
-from transformers import CONFIG_MAPPING
-
-
-COMMIT_MESSAGE = " This PR adds fp32 and fp16 weights in safetensors format to {}"
-
-
-def convert_single(model_id: str, filename: str, folder: str, progress: Any, token: str):
- progress(0, desc="Downloading model")
- local_file = os.path.join(model_id, filename)
- ckpt_file = local_file if os.path.isfile(local_file) else hf_hub_download(repo_id=model_id, filename=filename, token=token)
-
- pipeline = StableDiffusionXLPipeline.from_single_file(ckpt_file)
-
- pipeline.save_pretrained(folder, safe_serialization=True)
- pipeline = pipeline.to(torch_dtype=torch.float16)
- pipeline.save_pretrained(folder, safe_serialization=True, variant="fp16")
-
- return folder
-
-
-def previous_pr(api: "HfApi", model_id: str, pr_title: str) -> Optional["Discussion"]:
- try:
- discussions = api.get_repo_discussions(repo_id=model_id)
- except Exception:
- return None
- for discussion in discussions:
- if discussion.status == "open" and discussion.is_pull_request and discussion.title == pr_title:
- details = api.get_discussion_details(repo_id=model_id, discussion_num=discussion.num)
- if details.target_branch == "refs/heads/main":
- return discussion
-
-
-def convert(token: str, model_id: str, filename: str, progress=gr.Progress()):
- api = HfApi()
-
- pr_title = "Adding `diffusers` weights of this model"
-
- with TemporaryDirectory() as d:
- folder = os.path.join(d, repo_folder_name(repo_id=model_id, repo_type="models"))
- os.makedirs(folder)
- new_pr = None
- try:
- folder = convert_single(model_id, filename, folder, progress, token)
- progress(0.7, desc="Uploading to Hub")
- new_pr = api.upload_folder(folder_path=folder, path_in_repo="./", repo_id=model_id, repo_type="model", token=token, commit_message=pr_title, commit_description=COMMIT_MESSAGE.format(model_id), create_pr=True)
- pr_number = new_pr.split("%2F")[-1].split("/")[0]
- link = f"Pr created at: {'https://huggingface.co/' + os.path.join(model_id, 'discussions', pr_number)}"
- progress(1, desc="Done")
- except Exception as e:
- raise gr.exceptions.Error(str(e))
- finally:
- shutil.rmtree(folder)
-
- return link
diff --git a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/archs/vqgan_arch.py b/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/archs/vqgan_arch.py
deleted file mode 100644
index f6dfcf4c9983b431f0a978701e5ddd9598faf381..0000000000000000000000000000000000000000
--- a/spaces/FelixLuoX/codeformer/CodeFormer/basicsr/archs/vqgan_arch.py
+++ /dev/null
@@ -1,435 +0,0 @@
-'''
-VQGAN code, adapted from the original created by the Unleashing Transformers authors:
-https://github.com/samb-t/unleashing-transformers/blob/master/models/vqgan.py
-
-'''
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import copy
-from basicsr.utils import get_root_logger
-from basicsr.utils.registry import ARCH_REGISTRY
-
-def normalize(in_channels):
- return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
-
-
-@torch.jit.script
-def swish(x):
- return x*torch.sigmoid(x)
-
-
-# Define VQVAE classes
-class VectorQuantizer(nn.Module):
- def __init__(self, codebook_size, emb_dim, beta):
- super(VectorQuantizer, self).__init__()
- self.codebook_size = codebook_size # number of embeddings
- self.emb_dim = emb_dim # dimension of embedding
- self.beta = beta # commitment cost used in loss term, beta * ||z_e(x)-sg[e]||^2
- self.embedding = nn.Embedding(self.codebook_size, self.emb_dim)
- self.embedding.weight.data.uniform_(-1.0 / self.codebook_size, 1.0 / self.codebook_size)
-
- def forward(self, z):
- # reshape z -> (batch, height, width, channel) and flatten
- z = z.permute(0, 2, 3, 1).contiguous()
- z_flattened = z.view(-1, self.emb_dim)
-
- # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
- d = (z_flattened ** 2).sum(dim=1, keepdim=True) + (self.embedding.weight**2).sum(1) - \
- 2 * torch.matmul(z_flattened, self.embedding.weight.t())
-
- mean_distance = torch.mean(d)
- # find closest encodings
- # min_encoding_indices = torch.argmin(d, dim=1).unsqueeze(1)
- min_encoding_scores, min_encoding_indices = torch.topk(d, 1, dim=1, largest=False)
- # [0-1], higher score, higher confidence
- min_encoding_scores = torch.exp(-min_encoding_scores/10)
-
- min_encodings = torch.zeros(min_encoding_indices.shape[0], self.codebook_size).to(z)
- min_encodings.scatter_(1, min_encoding_indices, 1)
-
- # get quantized latent vectors
- z_q = torch.matmul(min_encodings, self.embedding.weight).view(z.shape)
- # compute loss for embedding
- loss = torch.mean((z_q.detach()-z)**2) + self.beta * torch.mean((z_q - z.detach()) ** 2)
- # preserve gradients
- z_q = z + (z_q - z).detach()
-
- # perplexity
- e_mean = torch.mean(min_encodings, dim=0)
- perplexity = torch.exp(-torch.sum(e_mean * torch.log(e_mean + 1e-10)))
- # reshape back to match original input shape
- z_q = z_q.permute(0, 3, 1, 2).contiguous()
-
- return z_q, loss, {
- "perplexity": perplexity,
- "min_encodings": min_encodings,
- "min_encoding_indices": min_encoding_indices,
- "min_encoding_scores": min_encoding_scores,
- "mean_distance": mean_distance
- }
-
- def get_codebook_feat(self, indices, shape):
- # input indices: batch*token_num -> (batch*token_num)*1
- # shape: batch, height, width, channel
- indices = indices.view(-1,1)
- min_encodings = torch.zeros(indices.shape[0], self.codebook_size).to(indices)
- min_encodings.scatter_(1, indices, 1)
- # get quantized latent vectors
- z_q = torch.matmul(min_encodings.float(), self.embedding.weight)
-
- if shape is not None: # reshape back to match original input shape
- z_q = z_q.view(shape).permute(0, 3, 1, 2).contiguous()
-
- return z_q
-
-
-class GumbelQuantizer(nn.Module):
- def __init__(self, codebook_size, emb_dim, num_hiddens, straight_through=False, kl_weight=5e-4, temp_init=1.0):
- super().__init__()
- self.codebook_size = codebook_size # number of embeddings
- self.emb_dim = emb_dim # dimension of embedding
- self.straight_through = straight_through
- self.temperature = temp_init
- self.kl_weight = kl_weight
- self.proj = nn.Conv2d(num_hiddens, codebook_size, 1) # projects last encoder layer to quantized logits
- self.embed = nn.Embedding(codebook_size, emb_dim)
-
- def forward(self, z):
- hard = self.straight_through if self.training else True
-
- logits = self.proj(z)
-
- soft_one_hot = F.gumbel_softmax(logits, tau=self.temperature, dim=1, hard=hard)
-
- z_q = torch.einsum("b n h w, n d -> b d h w", soft_one_hot, self.embed.weight)
-
- # + kl divergence to the prior loss
- qy = F.softmax(logits, dim=1)
- diff = self.kl_weight * torch.sum(qy * torch.log(qy * self.codebook_size + 1e-10), dim=1).mean()
- min_encoding_indices = soft_one_hot.argmax(dim=1)
-
- return z_q, diff, {
- "min_encoding_indices": min_encoding_indices
- }
-
-
-class Downsample(nn.Module):
- def __init__(self, in_channels):
- super().__init__()
- self.conv = torch.nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=2, padding=0)
-
- def forward(self, x):
- pad = (0, 1, 0, 1)
- x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
- x = self.conv(x)
- return x
-
-
-class Upsample(nn.Module):
- def __init__(self, in_channels):
- super().__init__()
- self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
-
- def forward(self, x):
- x = F.interpolate(x, scale_factor=2.0, mode="nearest")
- x = self.conv(x)
-
- return x
-
-
-class ResBlock(nn.Module):
- def __init__(self, in_channels, out_channels=None):
- super(ResBlock, self).__init__()
- self.in_channels = in_channels
- self.out_channels = in_channels if out_channels is None else out_channels
- self.norm1 = normalize(in_channels)
- self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
- self.norm2 = normalize(out_channels)
- self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
- if self.in_channels != self.out_channels:
- self.conv_out = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
-
- def forward(self, x_in):
- x = x_in
- x = self.norm1(x)
- x = swish(x)
- x = self.conv1(x)
- x = self.norm2(x)
- x = swish(x)
- x = self.conv2(x)
- if self.in_channels != self.out_channels:
- x_in = self.conv_out(x_in)
-
- return x + x_in
-
-
-class AttnBlock(nn.Module):
- def __init__(self, in_channels):
- super().__init__()
- self.in_channels = in_channels
-
- self.norm = normalize(in_channels)
- self.q = torch.nn.Conv2d(
- in_channels,
- in_channels,
- kernel_size=1,
- stride=1,
- padding=0
- )
- self.k = torch.nn.Conv2d(
- in_channels,
- in_channels,
- kernel_size=1,
- stride=1,
- padding=0
- )
- self.v = torch.nn.Conv2d(
- in_channels,
- in_channels,
- kernel_size=1,
- stride=1,
- padding=0
- )
- self.proj_out = torch.nn.Conv2d(
- in_channels,
- in_channels,
- kernel_size=1,
- stride=1,
- padding=0
- )
-
- def forward(self, x):
- h_ = x
- h_ = self.norm(h_)
- q = self.q(h_)
- k = self.k(h_)
- v = self.v(h_)
-
- # compute attention
- b, c, h, w = q.shape
- q = q.reshape(b, c, h*w)
- q = q.permute(0, 2, 1)
- k = k.reshape(b, c, h*w)
- w_ = torch.bmm(q, k)
- w_ = w_ * (int(c)**(-0.5))
- w_ = F.softmax(w_, dim=2)
-
- # attend to values
- v = v.reshape(b, c, h*w)
- w_ = w_.permute(0, 2, 1)
- h_ = torch.bmm(v, w_)
- h_ = h_.reshape(b, c, h, w)
-
- h_ = self.proj_out(h_)
-
- return x+h_
-
-
-class Encoder(nn.Module):
- def __init__(self, in_channels, nf, emb_dim, ch_mult, num_res_blocks, resolution, attn_resolutions):
- super().__init__()
- self.nf = nf
- self.num_resolutions = len(ch_mult)
- self.num_res_blocks = num_res_blocks
- self.resolution = resolution
- self.attn_resolutions = attn_resolutions
-
- curr_res = self.resolution
- in_ch_mult = (1,)+tuple(ch_mult)
-
- blocks = []
- # initial convultion
- blocks.append(nn.Conv2d(in_channels, nf, kernel_size=3, stride=1, padding=1))
-
- # residual and downsampling blocks, with attention on smaller res (16x16)
- for i in range(self.num_resolutions):
- block_in_ch = nf * in_ch_mult[i]
- block_out_ch = nf * ch_mult[i]
- for _ in range(self.num_res_blocks):
- blocks.append(ResBlock(block_in_ch, block_out_ch))
- block_in_ch = block_out_ch
- if curr_res in attn_resolutions:
- blocks.append(AttnBlock(block_in_ch))
-
- if i != self.num_resolutions - 1:
- blocks.append(Downsample(block_in_ch))
- curr_res = curr_res // 2
-
- # non-local attention block
- blocks.append(ResBlock(block_in_ch, block_in_ch))
- blocks.append(AttnBlock(block_in_ch))
- blocks.append(ResBlock(block_in_ch, block_in_ch))
-
- # normalise and convert to latent size
- blocks.append(normalize(block_in_ch))
- blocks.append(nn.Conv2d(block_in_ch, emb_dim, kernel_size=3, stride=1, padding=1))
- self.blocks = nn.ModuleList(blocks)
-
- def forward(self, x):
- for block in self.blocks:
- x = block(x)
-
- return x
-
-
-class Generator(nn.Module):
- def __init__(self, nf, emb_dim, ch_mult, res_blocks, img_size, attn_resolutions):
- super().__init__()
- self.nf = nf
- self.ch_mult = ch_mult
- self.num_resolutions = len(self.ch_mult)
- self.num_res_blocks = res_blocks
- self.resolution = img_size
- self.attn_resolutions = attn_resolutions
- self.in_channels = emb_dim
- self.out_channels = 3
- block_in_ch = self.nf * self.ch_mult[-1]
- curr_res = self.resolution // 2 ** (self.num_resolutions-1)
-
- blocks = []
- # initial conv
- blocks.append(nn.Conv2d(self.in_channels, block_in_ch, kernel_size=3, stride=1, padding=1))
-
- # non-local attention block
- blocks.append(ResBlock(block_in_ch, block_in_ch))
- blocks.append(AttnBlock(block_in_ch))
- blocks.append(ResBlock(block_in_ch, block_in_ch))
-
- for i in reversed(range(self.num_resolutions)):
- block_out_ch = self.nf * self.ch_mult[i]
-
- for _ in range(self.num_res_blocks):
- blocks.append(ResBlock(block_in_ch, block_out_ch))
- block_in_ch = block_out_ch
-
- if curr_res in self.attn_resolutions:
- blocks.append(AttnBlock(block_in_ch))
-
- if i != 0:
- blocks.append(Upsample(block_in_ch))
- curr_res = curr_res * 2
-
- blocks.append(normalize(block_in_ch))
- blocks.append(nn.Conv2d(block_in_ch, self.out_channels, kernel_size=3, stride=1, padding=1))
-
- self.blocks = nn.ModuleList(blocks)
-
-
- def forward(self, x):
- for block in self.blocks:
- x = block(x)
-
- return x
-
-
-@ARCH_REGISTRY.register()
-class VQAutoEncoder(nn.Module):
- def __init__(self, img_size, nf, ch_mult, quantizer="nearest", res_blocks=2, attn_resolutions=[16], codebook_size=1024, emb_dim=256,
- beta=0.25, gumbel_straight_through=False, gumbel_kl_weight=1e-8, model_path=None):
- super().__init__()
- logger = get_root_logger()
- self.in_channels = 3
- self.nf = nf
- self.n_blocks = res_blocks
- self.codebook_size = codebook_size
- self.embed_dim = emb_dim
- self.ch_mult = ch_mult
- self.resolution = img_size
- self.attn_resolutions = attn_resolutions
- self.quantizer_type = quantizer
- self.encoder = Encoder(
- self.in_channels,
- self.nf,
- self.embed_dim,
- self.ch_mult,
- self.n_blocks,
- self.resolution,
- self.attn_resolutions
- )
- if self.quantizer_type == "nearest":
- self.beta = beta #0.25
- self.quantize = VectorQuantizer(self.codebook_size, self.embed_dim, self.beta)
- elif self.quantizer_type == "gumbel":
- self.gumbel_num_hiddens = emb_dim
- self.straight_through = gumbel_straight_through
- self.kl_weight = gumbel_kl_weight
- self.quantize = GumbelQuantizer(
- self.codebook_size,
- self.embed_dim,
- self.gumbel_num_hiddens,
- self.straight_through,
- self.kl_weight
- )
- self.generator = Generator(
- self.nf,
- self.embed_dim,
- self.ch_mult,
- self.n_blocks,
- self.resolution,
- self.attn_resolutions
- )
-
- if model_path is not None:
- chkpt = torch.load(model_path, map_location='cpu')
- if 'params_ema' in chkpt:
- self.load_state_dict(torch.load(model_path, map_location='cpu')['params_ema'])
- logger.info(f'vqgan is loaded from: {model_path} [params_ema]')
- elif 'params' in chkpt:
- self.load_state_dict(torch.load(model_path, map_location='cpu')['params'])
- logger.info(f'vqgan is loaded from: {model_path} [params]')
- else:
- raise ValueError(f'Wrong params!')
-
-
- def forward(self, x):
- x = self.encoder(x)
- quant, codebook_loss, quant_stats = self.quantize(x)
- x = self.generator(quant)
- return x, codebook_loss, quant_stats
-
-
-
-# patch based discriminator
-@ARCH_REGISTRY.register()
-class VQGANDiscriminator(nn.Module):
- def __init__(self, nc=3, ndf=64, n_layers=4, model_path=None):
- super().__init__()
-
- layers = [nn.Conv2d(nc, ndf, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(0.2, True)]
- ndf_mult = 1
- ndf_mult_prev = 1
- for n in range(1, n_layers): # gradually increase the number of filters
- ndf_mult_prev = ndf_mult
- ndf_mult = min(2 ** n, 8)
- layers += [
- nn.Conv2d(ndf * ndf_mult_prev, ndf * ndf_mult, kernel_size=4, stride=2, padding=1, bias=False),
- nn.BatchNorm2d(ndf * ndf_mult),
- nn.LeakyReLU(0.2, True)
- ]
-
- ndf_mult_prev = ndf_mult
- ndf_mult = min(2 ** n_layers, 8)
-
- layers += [
- nn.Conv2d(ndf * ndf_mult_prev, ndf * ndf_mult, kernel_size=4, stride=1, padding=1, bias=False),
- nn.BatchNorm2d(ndf * ndf_mult),
- nn.LeakyReLU(0.2, True)
- ]
-
- layers += [
- nn.Conv2d(ndf * ndf_mult, 1, kernel_size=4, stride=1, padding=1)] # output 1 channel prediction map
- self.main = nn.Sequential(*layers)
-
- if model_path is not None:
- chkpt = torch.load(model_path, map_location='cpu')
- if 'params_d' in chkpt:
- self.load_state_dict(torch.load(model_path, map_location='cpu')['params_d'])
- elif 'params' in chkpt:
- self.load_state_dict(torch.load(model_path, map_location='cpu')['params'])
- else:
- raise ValueError(f'Wrong params!')
-
- def forward(self, x):
- return self.main(x)
\ No newline at end of file
diff --git a/spaces/Felladrin/Web-LLM-Mistral-7B-OpenOrca/dist/index.9744ff88.css b/spaces/Felladrin/Web-LLM-Mistral-7B-OpenOrca/dist/index.9744ff88.css
deleted file mode 100644
index 7f27ffbb5e0ba8eded3d8e1429eb083ae4551df1..0000000000000000000000000000000000000000
--- a/spaces/Felladrin/Web-LLM-Mistral-7B-OpenOrca/dist/index.9744ff88.css
+++ /dev/null
@@ -1 +0,0 @@
-.chatui{border:2px solid #ddd;border-radius:5px;flex-flow:column wrap;justify-content:space-between;width:100%;max-width:867px;height:600px;margin:25px 10px;display:flex;box-shadow:0 15px 15px -5px #0003}s .chatui-header{color:#666;background:#eee;border-bottom:2px solid #ddd;justify-content:space-between;padding:10px;display:flex}.chatui-chat{flex:1;padding:10px;overflow-y:auto}.chatui-chat::-webkit-scrollbar{width:6px}.chatui-chat::-webkit-scrollbar-track{background:#ddd}.chatui-chat::-webkit-scrollbar-thumb{background:#bdbdbd}.msg{align-items:flex-end;margin-bottom:10px;display:flex}.msg:last-of-type{margin:0}.msg-bubble{background:#ececec;border-radius:15px;max-width:450px;padding:15px}.left-msg .msg-bubble{border-bottom-left-radius:0}.error-msg .msg-bubble{color:#f15959;border-bottom-left-radius:0}.init-msg .msg-bubble{border-bottom-left-radius:0}.right-msg{flex-direction:row-reverse}.right-msg .msg-bubble{color:#fff;background:#579ffb;border-bottom-right-radius:0}.chatui-inputarea{background:#eee;border-top:2px solid #ddd;padding:10px;display:flex}.chatui-inputarea *{border:none;border-radius:3px;padding:10px;font-size:1em}.chatui-input{background:#ddd;flex:1}.chatui-reset-btn{cursor:pointer;background:#ececec;border-radius:8px;width:200px;margin-left:10px;font-weight:700}.chatui-reset-btn:hover{background:#dcdada}.chatui-send-btn{color:#fff;cursor:pointer;background:#579ffb;margin-left:10px;font-weight:700}.chatui-send-btn:hover{background:#577bfb}.chatui-chat{background-color:#fcfcfe}
\ No newline at end of file
diff --git a/spaces/FrankZxShen/so-vits-svc-models-pcr/pretrain/meta.py b/spaces/FrankZxShen/so-vits-svc-models-pcr/pretrain/meta.py
deleted file mode 100644
index cc35dd3c0dfe8436e7d635f2db507cedca75ed49..0000000000000000000000000000000000000000
--- a/spaces/FrankZxShen/so-vits-svc-models-pcr/pretrain/meta.py
+++ /dev/null
@@ -1,31 +0,0 @@
-def download_dict():
- return {
- "vec768l12": {
- "url": "https://ibm.ent.box.com/shared/static/z1wgl1stco8ffooyatzdwsqn2psd9lrr",
- "output": "./pretrain/checkpoint_best_legacy_500.pt"
- },
- "vec256l9": {
- "url": "https://ibm.ent.box.com/shared/static/z1wgl1stco8ffooyatzdwsqn2psd9lrr",
- "output": "./pretrain/checkpoint_best_legacy_500.pt"
- },
- "hubertsoft": {
- "url": "https://github.com/bshall/hubert/releases/download/v0.1/hubert-soft-0d54a1f4.pt",
- "output": "./pretrain/hubert-soft-0d54a1f4.pt"
- },
- "whisper-ppg": {
- "url": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
- "output": "./pretrain/medium.pt"
- }
- }
-
-
-def get_speech_encoder(config_path="configs/config.json"):
- import json
-
- with open(config_path, "r") as f:
- data = f.read()
- config = json.loads(data)
- speech_encoder = config["model"]["speech_encoder"]
- dict = download_dict()
-
- return dict[speech_encoder]["url"], dict[speech_encoder]["output"]
diff --git a/spaces/GipAdonimus/Real-Time-Voice-Cloning/synthesizer/utils/numbers.py b/spaces/GipAdonimus/Real-Time-Voice-Cloning/synthesizer/utils/numbers.py
deleted file mode 100644
index 75020a0bd732830f603d7c7d250c9e087033cc24..0000000000000000000000000000000000000000
--- a/spaces/GipAdonimus/Real-Time-Voice-Cloning/synthesizer/utils/numbers.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import re
-import inflect
-
-_inflect = inflect.engine()
-_comma_number_re = re.compile(r"([0-9][0-9\,]+[0-9])")
-_decimal_number_re = re.compile(r"([0-9]+\.[0-9]+)")
-_pounds_re = re.compile(r"£([0-9\,]*[0-9]+)")
-_dollars_re = re.compile(r"\$([0-9\.\,]*[0-9]+)")
-_ordinal_re = re.compile(r"[0-9]+(st|nd|rd|th)")
-_number_re = re.compile(r"[0-9]+")
-
-
-def _remove_commas(m):
- return m.group(1).replace(",", "")
-
-
-def _expand_decimal_point(m):
- return m.group(1).replace(".", " point ")
-
-
-def _expand_dollars(m):
- match = m.group(1)
- parts = match.split(".")
- if len(parts) > 2:
- return match + " dollars" # Unexpected format
- dollars = int(parts[0]) if parts[0] else 0
- cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
- if dollars and cents:
- dollar_unit = "dollar" if dollars == 1 else "dollars"
- cent_unit = "cent" if cents == 1 else "cents"
- return "%s %s, %s %s" % (dollars, dollar_unit, cents, cent_unit)
- elif dollars:
- dollar_unit = "dollar" if dollars == 1 else "dollars"
- return "%s %s" % (dollars, dollar_unit)
- elif cents:
- cent_unit = "cent" if cents == 1 else "cents"
- return "%s %s" % (cents, cent_unit)
- else:
- return "zero dollars"
-
-
-def _expand_ordinal(m):
- return _inflect.number_to_words(m.group(0))
-
-
-def _expand_number(m):
- num = int(m.group(0))
- if num > 1000 and num < 3000:
- if num == 2000:
- return "two thousand"
- elif num > 2000 and num < 2010:
- return "two thousand " + _inflect.number_to_words(num % 100)
- elif num % 100 == 0:
- return _inflect.number_to_words(num // 100) + " hundred"
- else:
- return _inflect.number_to_words(num, andword="", zero="oh", group=2).replace(", ", " ")
- else:
- return _inflect.number_to_words(num, andword="")
-
-
-def normalize_numbers(text):
- text = re.sub(_comma_number_re, _remove_commas, text)
- text = re.sub(_pounds_re, r"\1 pounds", text)
- text = re.sub(_dollars_re, _expand_dollars, text)
- text = re.sub(_decimal_number_re, _expand_decimal_point, text)
- text = re.sub(_ordinal_re, _expand_ordinal, text)
- text = re.sub(_number_re, _expand_number, text)
- return text
diff --git a/spaces/Gmq-x/gpt-academic/crazy_functions/test_project/python/dqn/dqn.py b/spaces/Gmq-x/gpt-academic/crazy_functions/test_project/python/dqn/dqn.py
deleted file mode 100644
index 6cea64d39baa7ff4c1e549869aaa4b0ae17779a9..0000000000000000000000000000000000000000
--- a/spaces/Gmq-x/gpt-academic/crazy_functions/test_project/python/dqn/dqn.py
+++ /dev/null
@@ -1,245 +0,0 @@
-from typing import Any, Dict, List, Optional, Tuple, Type, Union
-
-import gym
-import numpy as np
-import torch as th
-from torch.nn import functional as F
-
-from stable_baselines3.common import logger
-from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
-from stable_baselines3.common.preprocessing import maybe_transpose
-from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
-from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update
-from stable_baselines3.dqn.policies import DQNPolicy
-
-
-class DQN(OffPolicyAlgorithm):
- """
- Deep Q-Network (DQN)
-
- Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236
- Default hyperparameters are taken from the nature paper,
- except for the optimizer and learning rate that were taken from Stable Baselines defaults.
-
- :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
- :param env: The environment to learn from (if registered in Gym, can be str)
- :param learning_rate: The learning rate, it can be a function
- of the current progress remaining (from 1 to 0)
- :param buffer_size: size of the replay buffer
- :param learning_starts: how many steps of the model to collect transitions for before learning starts
- :param batch_size: Minibatch size for each gradient update
- :param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update
- :param gamma: the discount factor
- :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit
- like ``(5, "step")`` or ``(2, "episode")``.
- :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)
- Set to ``-1`` means to do as many gradient steps as steps done in the environment
- during the rollout.
- :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
- at a cost of more complexity.
- See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
- :param target_update_interval: update the target network every ``target_update_interval``
- environment steps.
- :param exploration_fraction: fraction of entire training period over which the exploration rate is reduced
- :param exploration_initial_eps: initial value of random action probability
- :param exploration_final_eps: final value of random action probability
- :param max_grad_norm: The maximum value for the gradient clipping
- :param tensorboard_log: the log location for tensorboard (if None, no logging)
- :param create_eval_env: Whether to create a second environment that will be
- used for evaluating the agent periodically. (Only available when passing string for the environment)
- :param policy_kwargs: additional arguments to be passed to the policy on creation
- :param verbose: the verbosity level: 0 no output, 1 info, 2 debug
- :param seed: Seed for the pseudo random generators
- :param device: Device (cpu, cuda, ...) on which the code should be run.
- Setting it to auto, the code will be run on the GPU if possible.
- :param _init_setup_model: Whether or not to build the network at the creation of the instance
- """
-
- def __init__(
- self,
- policy: Union[str, Type[DQNPolicy]],
- env: Union[GymEnv, str],
- learning_rate: Union[float, Schedule] = 1e-4,
- buffer_size: int = 1000000,
- learning_starts: int = 50000,
- batch_size: Optional[int] = 32,
- tau: float = 1.0,
- gamma: float = 0.99,
- train_freq: Union[int, Tuple[int, str]] = 4,
- gradient_steps: int = 1,
- optimize_memory_usage: bool = False,
- target_update_interval: int = 10000,
- exploration_fraction: float = 0.1,
- exploration_initial_eps: float = 1.0,
- exploration_final_eps: float = 0.05,
- max_grad_norm: float = 10,
- tensorboard_log: Optional[str] = None,
- create_eval_env: bool = False,
- policy_kwargs: Optional[Dict[str, Any]] = None,
- verbose: int = 0,
- seed: Optional[int] = None,
- device: Union[th.device, str] = "auto",
- _init_setup_model: bool = True,
- ):
-
- super(DQN, self).__init__(
- policy,
- env,
- DQNPolicy,
- learning_rate,
- buffer_size,
- learning_starts,
- batch_size,
- tau,
- gamma,
- train_freq,
- gradient_steps,
- action_noise=None, # No action noise
- policy_kwargs=policy_kwargs,
- tensorboard_log=tensorboard_log,
- verbose=verbose,
- device=device,
- create_eval_env=create_eval_env,
- seed=seed,
- sde_support=False,
- optimize_memory_usage=optimize_memory_usage,
- supported_action_spaces=(gym.spaces.Discrete,),
- )
-
- self.exploration_initial_eps = exploration_initial_eps
- self.exploration_final_eps = exploration_final_eps
- self.exploration_fraction = exploration_fraction
- self.target_update_interval = target_update_interval
- self.max_grad_norm = max_grad_norm
- # "epsilon" for the epsilon-greedy exploration
- self.exploration_rate = 0.0
- # Linear schedule will be defined in `_setup_model()`
- self.exploration_schedule = None
- self.q_net, self.q_net_target = None, None
-
- if _init_setup_model:
- self._setup_model()
-
- def _setup_model(self) -> None:
- super(DQN, self)._setup_model()
- self._create_aliases()
- self.exploration_schedule = get_linear_fn(
- self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction
- )
-
- def _create_aliases(self) -> None:
- self.q_net = self.policy.q_net
- self.q_net_target = self.policy.q_net_target
-
- def _on_step(self) -> None:
- """
- Update the exploration rate and target network if needed.
- This method is called in ``collect_rollouts()`` after each step in the environment.
- """
- if self.num_timesteps % self.target_update_interval == 0:
- polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)
-
- self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)
- logger.record("rollout/exploration rate", self.exploration_rate)
-
- def train(self, gradient_steps: int, batch_size: int = 100) -> None:
- # Update learning rate according to schedule
- self._update_learning_rate(self.policy.optimizer)
-
- losses = []
- for _ in range(gradient_steps):
- # Sample replay buffer
- replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
-
- with th.no_grad():
- # Compute the next Q-values using the target network
- next_q_values = self.q_net_target(replay_data.next_observations)
- # Follow greedy policy: use the one with the highest value
- next_q_values, _ = next_q_values.max(dim=1)
- # Avoid potential broadcast issue
- next_q_values = next_q_values.reshape(-1, 1)
- # 1-step TD target
- target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values
-
- # Get current Q-values estimates
- current_q_values = self.q_net(replay_data.observations)
-
- # Retrieve the q-values for the actions from the replay buffer
- current_q_values = th.gather(current_q_values, dim=1, index=replay_data.actions.long())
-
- # Compute Huber loss (less sensitive to outliers)
- loss = F.smooth_l1_loss(current_q_values, target_q_values)
- losses.append(loss.item())
-
- # Optimize the policy
- self.policy.optimizer.zero_grad()
- loss.backward()
- # Clip gradient norm
- th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
- self.policy.optimizer.step()
-
- # Increase update counter
- self._n_updates += gradient_steps
-
- logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
- logger.record("train/loss", np.mean(losses))
-
- def predict(
- self,
- observation: np.ndarray,
- state: Optional[np.ndarray] = None,
- mask: Optional[np.ndarray] = None,
- deterministic: bool = False,
- ) -> Tuple[np.ndarray, Optional[np.ndarray]]:
- """
- Overrides the base_class predict function to include epsilon-greedy exploration.
-
- :param observation: the input observation
- :param state: The last states (can be None, used in recurrent policies)
- :param mask: The last masks (can be None, used in recurrent policies)
- :param deterministic: Whether or not to return deterministic actions.
- :return: the model's action and the next state
- (used in recurrent policies)
- """
- if not deterministic and np.random.rand() < self.exploration_rate:
- if is_vectorized_observation(maybe_transpose(observation, self.observation_space), self.observation_space):
- n_batch = observation.shape[0]
- action = np.array([self.action_space.sample() for _ in range(n_batch)])
- else:
- action = np.array(self.action_space.sample())
- else:
- action, state = self.policy.predict(observation, state, mask, deterministic)
- return action, state
-
- def learn(
- self,
- total_timesteps: int,
- callback: MaybeCallback = None,
- log_interval: int = 4,
- eval_env: Optional[GymEnv] = None,
- eval_freq: int = -1,
- n_eval_episodes: int = 5,
- tb_log_name: str = "DQN",
- eval_log_path: Optional[str] = None,
- reset_num_timesteps: bool = True,
- ) -> OffPolicyAlgorithm:
-
- return super(DQN, self).learn(
- total_timesteps=total_timesteps,
- callback=callback,
- log_interval=log_interval,
- eval_env=eval_env,
- eval_freq=eval_freq,
- n_eval_episodes=n_eval_episodes,
- tb_log_name=tb_log_name,
- eval_log_path=eval_log_path,
- reset_num_timesteps=reset_num_timesteps,
- )
-
- def _excluded_save_params(self) -> List[str]:
- return super(DQN, self)._excluded_save_params() + ["q_net", "q_net_target"]
-
- def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
- state_dicts = ["policy", "policy.optimizer"]
-
- return state_dicts, []
diff --git a/spaces/GodParticle69/minor_demo/mrcnn/visualize.py b/spaces/GodParticle69/minor_demo/mrcnn/visualize.py
deleted file mode 100644
index ebddf729b364657e304af0b66c5d3a44eedd404f..0000000000000000000000000000000000000000
--- a/spaces/GodParticle69/minor_demo/mrcnn/visualize.py
+++ /dev/null
@@ -1,452 +0,0 @@
-"""
-Mask R-CNN
-Display and Visualization Functions.
-
-Copyright (c) 2017 Matterport, Inc.
-Licensed under the MIT License (see LICENSE for details)
-Written by Waleed Abdulla
-"""
-
-import os
-import sys
-import logging
-import random
-import itertools
-import colorsys
-
-import numpy as np
-from skimage.measure import find_contours
-import matplotlib.pyplot as plt
-from matplotlib import patches, lines
-from matplotlib.patches import Polygon
-import IPython.display
-
-# Root directory of the project
-ROOT_DIR = os.path.abspath("../")
-
-# Import Mask RCNN
-sys.path.append(ROOT_DIR) # To find local version of the library
-from mrcnn import utils
-
-
-############################################################
-# Visualization
-############################################################
-
-def display_images(images, titles=None, cols=4, cmap=None, norm=None,
- interpolation=None):
- """Display the given set of images, optionally with titles.
- images: list or array of image tensors in HWC format.
- titles: optional. A list of titles to display with each image.
- cols: number of images per row
- cmap: Optional. Color map to use. For example, "Blues".
- norm: Optional. A Normalize instance to map values to colors.
- interpolation: Optional. Image interporlation to use for display.
- """
- # titles = titles if titles is not None else [""] * len(images)
- # rows = len(images) // cols + 1
- # plt.figure(figsize=(14, 14 * rows // cols))
- # i = 1
- # for image, title in zip(images, titles):
- # plt.subplot(rows, cols, i)
- # plt.title(title, fontsize=9)
- # plt.axis('off')
- # plt.imshow(image.astype(np.uint8), cmap=cmap,
- # norm=norm, interpolation=interpolation)
- # i += 1
- # plt.show()
- pass
-
-
-def random_colors(N, bright=True):
- """
- Generate random colors.
- To get visually distinct colors, generate them in HSV space then
- convert to RGB.
- """
- brightness = 1.0 if bright else 0.7
- hsv = [(i / N, 1, brightness) for i in range(N)]
- colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
- random.shuffle(colors)
- return colors
-
-
-def apply_mask(image, mask, color, alpha=0.5):
- """Apply the given mask to the image.
- """
- for c in range(3):
- image[:, :, c] = np.where(mask == 1,
- image[:, :, c] *
- (1 - alpha) + alpha * color[c] * 255,
- image[:, :, c])
- return image
-
-
-def display_instances(image, boxes, masks, class_ids, class_names,
- scores=None, title="",
- figsize=(16, 16), ax=None):
- """
- boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
- masks: [height, width, num_instances]
- class_ids: [num_instances]
- class_names: list of class names of the dataset
- scores: (optional) confidence scores for each box
- figsize: (optional) the size of the image.
- """
- # Number of instances
- N = boxes.shape[0]
- if not N:
- print("\n*** No instances to display *** \n")
- else:
- assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
-
- # if not ax:
- # _, ax = plt.subplots(1, figsize=figsize)
-
- # Generate random colors
- colors = random_colors(N)
-
- # Show area outside image boundaries.
- height, width = image.shape[:2]
-# ax.set_ylim(height + 10, -10)
-# ax.set_xlim(-10, width + 10)
-# ax.axis('off')
-# ax.set_title(title)
-
- masked_image = image.astype(np.uint32).copy()
- for i in range(N):
- color = colors[i]
-
- # Bounding box
- if not np.any(boxes[i]):
- # Skip this instance. Has no bbox. Likely lost in image cropping.
- continue
- y1, x1, y2, x2 = boxes[i]
- p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
- alpha=0.7, linestyle="dashed",
- edgecolor=color, facecolor='none')
- #ax.add_patch(p)
-
- # Label
- class_id = class_ids[i]
- score = scores[i] if scores is not None else None
- label = class_names[class_id]
- x = random.randint(x1, (x1 + x2) // 2)
- caption = "{} {:.3f}".format(label, score) if score else label
-# ax.text(x1, y1 + 8, caption,
-# color='w', size=11, backgroundcolor="none")
-
- # Mask
- mask = masks[:, :, i]
- masked_image = apply_mask(masked_image, mask, color)
-
- # Mask Polygon
- # Pad to ensure proper polygons for masks that touch image edges.
- padded_mask = np.zeros(
- (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
- padded_mask[1:-1, 1:-1] = mask
- contours = find_contours(padded_mask, 0.5)
- for verts in contours:
- # Subtract the padding and flip (y, x) to (x, y)
- verts = np.fliplr(verts) - 1
- p = Polygon(verts, facecolor="none", edgecolor=color)
- #ax.add_patch(p)
- #ax.imshow(masked_image.astype(np.uint8))
- #plt.show()
- return masked_image.astype(np.uint8)
-
-
-def draw_rois(image, rois, refined_rois, mask, class_ids, class_names, limit=10):
- """
- anchors: [n, (y1, x1, y2, x2)] list of anchors in image coordinates.
- proposals: [n, 4] the same anchors but refined to fit objects better.
- """
- masked_image = image.copy()
-
- # Pick random anchors in case there are too many.
- ids = np.arange(rois.shape[0], dtype=np.int32)
- ids = np.random.choice(
- ids, limit, replace=False) if ids.shape[0] > limit else ids
-
- fig, ax = plt.subplots(1, figsize=(12, 12))
- if rois.shape[0] > limit:
- plt.title("Showing {} random ROIs out of {}".format(
- len(ids), rois.shape[0]))
- else:
- plt.title("{} ROIs".format(len(ids)))
-
- # Show area outside image boundaries.
- ax.set_ylim(image.shape[0] + 20, -20)
- ax.set_xlim(-50, image.shape[1] + 20)
- ax.axis('off')
-
- for i, id in enumerate(ids):
- color = np.random.rand(3)
- class_id = class_ids[id]
- # ROI
- y1, x1, y2, x2 = rois[id]
- p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
- edgecolor=color if class_id else "gray",
- facecolor='none', linestyle="dashed")
- ax.add_patch(p)
- # Refined ROI
- if class_id:
- ry1, rx1, ry2, rx2 = refined_rois[id]
- p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,
- edgecolor=color, facecolor='none')
- ax.add_patch(p)
- # Connect the top-left corners of the anchor and proposal for easy visualization
- ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))
-
- # Label
- label = class_names[class_id]
- ax.text(rx1, ry1 + 8, "{}".format(label),
- color='w', size=11, backgroundcolor="none")
-
- # Mask
- m = utils.unmold_mask(mask[id], rois[id]
- [:4].astype(np.int32), image.shape)
- masked_image = apply_mask(masked_image, m, color)
-
- #ax.imshow(masked_image)
-
- # Print stats
- print("Positive ROIs: ", class_ids[class_ids > 0].shape[0])
- print("Negative ROIs: ", class_ids[class_ids == 0].shape[0])
- print("Positive Ratio: {:.2f}".format(
- class_ids[class_ids > 0].shape[0] / class_ids.shape[0]))
-
-
-# TODO: Replace with matplotlib equivalent?
-def draw_box(image, box, color):
- """Draw 3-pixel width bounding boxes on the given image array.
- color: list of 3 int values for RGB.
- """
- y1, x1, y2, x2 = box
- image[y1:y1 + 2, x1:x2] = color
- image[y2:y2 + 2, x1:x2] = color
- image[y1:y2, x1:x1 + 2] = color
- image[y1:y2, x2:x2 + 2] = color
- return image
-
-
-def display_top_masks(image, mask, class_ids, class_names, limit=4):
- """Display the given image and the top few class masks."""
- to_display = []
- titles = []
- to_display.append(image)
- titles.append("H x W={}x{}".format(image.shape[0], image.shape[1]))
- # Pick top prominent classes in this image
- unique_class_ids = np.unique(class_ids)
- mask_area = [np.sum(mask[:, :, np.where(class_ids == i)[0]])
- for i in unique_class_ids]
- top_ids = [v[0] for v in sorted(zip(unique_class_ids, mask_area),
- key=lambda r: r[1], reverse=True) if v[1] > 0]
- # Generate images and titles
- for i in range(limit):
- class_id = top_ids[i] if i < len(top_ids) else -1
- # Pull masks of instances belonging to the same class.
- m = mask[:, :, np.where(class_ids == class_id)[0]]
- m = np.sum(m * np.arange(1, m.shape[-1] + 1), -1)
- to_display.append(m)
- titles.append(class_names[class_id] if class_id != -1 else "-")
- display_images(to_display, titles=titles, cols=limit + 1, cmap="Blues_r")
-
-
-def plot_precision_recall(AP, precisions, recalls):
- """Draw the precision-recall curve.
-
- AP: Average precision at IoU >= 0.5
- precisions: list of precision values
- recalls: list of recall values
- """
- # Plot the Precision-Recall curve
- _, ax = plt.subplots(1)
- ax.set_title("Precision-Recall Curve. AP@50 = {:.3f}".format(AP))
- ax.set_ylim(0, 1.1)
- ax.set_xlim(0, 1.1)
- _ = ax.plot(recalls, precisions)
-
-
-def plot_overlaps(gt_class_ids, pred_class_ids, pred_scores,
- overlaps, class_names, threshold=0.5):
- """Draw a grid showing how ground truth objects are classified.
- gt_class_ids: [N] int. Ground truth class IDs
- pred_class_id: [N] int. Predicted class IDs
- pred_scores: [N] float. The probability scores of predicted classes
- overlaps: [pred_boxes, gt_boxes] IoU overlaps of predictins and GT boxes.
- class_names: list of all class names in the dataset
- threshold: Float. The prediction probability required to predict a class
- """
- gt_class_ids = gt_class_ids[gt_class_ids != 0]
- pred_class_ids = pred_class_ids[pred_class_ids != 0]
-
- plt.figure(figsize=(12, 10))
- plt.imshow(overlaps, interpolation='nearest', cmap=plt.cm.Blues)
- plt.yticks(np.arange(len(pred_class_ids)),
- ["{} ({:.2f})".format(class_names[int(id)], pred_scores[i])
- for i, id in enumerate(pred_class_ids)])
- plt.xticks(np.arange(len(gt_class_ids)),
- [class_names[int(id)] for id in gt_class_ids], rotation=90)
-
- thresh = overlaps.max() / 2.
- for i, j in itertools.product(range(overlaps.shape[0]),
- range(overlaps.shape[1])):
- text = ""
- if overlaps[i, j] > threshold:
- text = "match" if gt_class_ids[j] == pred_class_ids[i] else "wrong"
- color = ("white" if overlaps[i, j] > thresh
- else "black" if overlaps[i, j] > 0
- else "grey")
- plt.text(j, i, "{:.3f}\n{}".format(overlaps[i, j], text),
- horizontalalignment="center", verticalalignment="center",
- fontsize=9, color=color)
-
- plt.tight_layout()
- plt.xlabel("Ground Truth")
- plt.ylabel("Predictions")
-
-
-def draw_boxes(image, boxes=None, refined_boxes=None,
- masks=None, captions=None, visibilities=None,
- title="", ax=None):
- """Draw bounding boxes and segmentation masks with differnt
- customizations.
-
- boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates.
- refined_boxes: Like boxes, but draw with solid lines to show
- that they're the result of refining 'boxes'.
- masks: [N, height, width]
- captions: List of N titles to display on each box
- visibilities: (optional) List of values of 0, 1, or 2. Determine how
- prominant each bounding box should be.
- title: An optional title to show over the image
- ax: (optional) Matplotlib axis to draw on.
- """
- # Number of boxes
- assert boxes is not None or refined_boxes is not None
- N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0]
-
- # Matplotlib Axis
- if not ax:
- _, ax = plt.subplots(1, figsize=(12, 12))
-
- # Generate random colors
- colors = random_colors(N)
-
- # Show area outside image boundaries.
- margin = image.shape[0] // 10
- ax.set_ylim(image.shape[0] + margin, -margin)
- ax.set_xlim(-margin, image.shape[1] + margin)
- ax.axis('off')
-
- ax.set_title(title)
-
- masked_image = image.astype(np.uint32).copy()
- for i in range(N):
- # Box visibility
- visibility = visibilities[i] if visibilities is not None else 1
- if visibility == 0:
- color = "gray"
- style = "dotted"
- alpha = 0.5
- elif visibility == 1:
- color = colors[i]
- style = "dotted"
- alpha = 1
- elif visibility == 2:
- color = colors[i]
- style = "solid"
- alpha = 1
-
- # Boxes
- if boxes is not None:
- if not np.any(boxes[i]):
- # Skip this instance. Has no bbox. Likely lost in cropping.
- continue
- y1, x1, y2, x2 = boxes[i]
- p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
- alpha=alpha, linestyle=style,
- edgecolor=color, facecolor='none')
- ax.add_patch(p)
-
- # Refined boxes
- if refined_boxes is not None and visibility > 0:
- ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32)
- p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,
- edgecolor=color, facecolor='none')
- ax.add_patch(p)
- # Connect the top-left corners of the anchor and proposal
- if boxes is not None:
- ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))
-
- # Captions
- if captions is not None:
- caption = captions[i]
- # If there are refined boxes, display captions on them
- if refined_boxes is not None:
- y1, x1, y2, x2 = ry1, rx1, ry2, rx2
- x = random.randint(x1, (x1 + x2) // 2)
- ax.text(x1, y1, caption, size=11, verticalalignment='top',
- color='w', backgroundcolor="none",
- bbox={'facecolor': color, 'alpha': 0.5,
- 'pad': 2, 'edgecolor': 'none'})
-
- # Masks
- if masks is not None:
- mask = masks[:, :, i]
- masked_image = apply_mask(masked_image, mask, color)
- # Mask Polygon
- # Pad to ensure proper polygons for masks that touch image edges.
- padded_mask = np.zeros(
- (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)
- padded_mask[1:-1, 1:-1] = mask
- contours = find_contours(padded_mask, 0.5)
- for verts in contours:
- # Subtract the padding and flip (y, x) to (x, y)
- verts = np.fliplr(verts) - 1
- p = Polygon(verts, facecolor="none", edgecolor=color)
- ax.add_patch(p)
- ax.imshow(masked_image.astype(np.uint8))
-
-
-def display_table(table):
- """Display values in a table format.
- table: an iterable of rows, and each row is an iterable of values.
- """
- html = ""
- for row in table:
- row_html = ""
- for col in row:
- row_html += "
{:40}
".format(str(col))
- html += "
" + row_html + "
"
- html = "
" + html + "
"
- #IPython.display.display(IPython.display.HTML(html))
-
-
-def display_weight_stats(model):
- """Scans all the weights in the model and returns a list of tuples
- that contain stats about each weight.
- """
- layers = model.get_trainable_layers()
- table = [["WEIGHT NAME", "SHAPE", "MIN", "MAX", "STD"]]
- for l in layers:
- weight_values = l.get_weights() # list of Numpy arrays
- weight_tensors = l.weights # list of TF tensors
- for i, w in enumerate(weight_values):
- weight_name = weight_tensors[i].name
- # Detect problematic layers. Exclude biases of conv layers.
- alert = ""
- if w.min() == w.max() and not (l.__class__.__name__ == "Conv2D" and i == 1):
- alert += "*** dead?"
- if np.abs(w.min()) > 1000 or np.abs(w.max()) > 1000:
- alert += "*** Overflow?"
- # Add row
- table.append([
- weight_name + alert,
- str(w.shape),
- "{:+9.4f}".format(w.min()),
- "{:+10.4f}".format(w.max()),
- "{:+9.4f}".format(w.std()),
- ])
- #display_table(table)
diff --git a/spaces/Goya11/zimu/README.md b/spaces/Goya11/zimu/README.md
deleted file mode 100644
index 5cc3da0bc0fb1033ffef1178324d5440a3373d09..0000000000000000000000000000000000000000
--- a/spaces/Goya11/zimu/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Zimu
-emoji: 🏆
-colorFrom: yellow
-colorTo: green
-sdk: gradio
-sdk_version: 3.28.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Gradio-Blocks/EmojiGAN/torch_utils/ops/grid_sample_gradfix.py b/spaces/Gradio-Blocks/EmojiGAN/torch_utils/ops/grid_sample_gradfix.py
deleted file mode 100644
index ca6b3413ea72a734703c34382c023b84523601fd..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/EmojiGAN/torch_utils/ops/grid_sample_gradfix.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-"""Custom replacement for `torch.nn.functional.grid_sample` that
-supports arbitrarily high order gradients between the input and output.
-Only works on 2D images and assumes
-`mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`."""
-
-import warnings
-import torch
-
-# pylint: disable=redefined-builtin
-# pylint: disable=arguments-differ
-# pylint: disable=protected-access
-
-#----------------------------------------------------------------------------
-
-enabled = False # Enable the custom op by setting this to true.
-
-#----------------------------------------------------------------------------
-
-def grid_sample(input, grid):
- if _should_use_custom_op():
- return _GridSample2dForward.apply(input, grid)
- return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
-
-#----------------------------------------------------------------------------
-
-def _should_use_custom_op():
- if not enabled:
- return False
- if any(torch.__version__.startswith(x) for x in ['1.7.', '1.8.', '1.9']):
- return True
- warnings.warn(f'grid_sample_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.grid_sample().')
- return False
-
-#----------------------------------------------------------------------------
-
-class _GridSample2dForward(torch.autograd.Function):
- @staticmethod
- def forward(ctx, input, grid):
- assert input.ndim == 4
- assert grid.ndim == 4
- output = torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
- ctx.save_for_backward(input, grid)
- return output
-
- @staticmethod
- def backward(ctx, grad_output):
- input, grid = ctx.saved_tensors
- grad_input, grad_grid = _GridSample2dBackward.apply(grad_output, input, grid)
- return grad_input, grad_grid
-
-#----------------------------------------------------------------------------
-
-class _GridSample2dBackward(torch.autograd.Function):
- @staticmethod
- def forward(ctx, grad_output, input, grid):
- op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward')
- grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False)
- ctx.save_for_backward(grid)
- return grad_input, grad_grid
-
- @staticmethod
- def backward(ctx, grad2_grad_input, grad2_grad_grid):
- _ = grad2_grad_grid # unused
- grid, = ctx.saved_tensors
- grad2_grad_output = None
- grad2_input = None
- grad2_grid = None
-
- if ctx.needs_input_grad[0]:
- grad2_grad_output = _GridSample2dForward.apply(grad2_grad_input, grid)
-
- assert not ctx.needs_input_grad[2]
- return grad2_grad_output, grad2_input, grad2_grid
-
-#----------------------------------------------------------------------------
diff --git a/spaces/Gradio-Blocks/pokemon-move-generator-app/README.md b/spaces/Gradio-Blocks/pokemon-move-generator-app/README.md
deleted file mode 100644
index a16cbf994f996fe17dc7a81878744648348e88d6..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/pokemon-move-generator-app/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Pokémon Move Generator
-emoji: 🎮
-colorFrom: red
-colorTo: grey
-sdk: gradio
-sdk_version: 3.0.2
-app_file: app.py
-pinned: True
----
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py
deleted file mode 100644
index 8e8b830fd544b73d2da7a359ea208178a37fc324..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py
+++ /dev/null
@@ -1,4 +0,0 @@
-_base_ = './cascade_rcnn_r50_caffe_fpn_1x_coco.py'
-model = dict(
- pretrained='open-mmlab://detectron2/resnet101_caffe',
- backbone=dict(depth=101))
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py
deleted file mode 100644
index 2816b16f64dbcbfecd779650aaae0ca6cee0d810..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# TODO: Remove this config after benchmarking all related configs
-_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py'
-
-data = dict(samples_per_gpu=4, workers_per_gpu=4)
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/gn+ws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/gn+ws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py
deleted file mode 100644
index a5f6bd2292f4c1dfbd59de968e0dc3acf7579424..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/gn+ws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py
+++ /dev/null
@@ -1,3 +0,0 @@
-_base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py'
-model = dict(
- pretrained='open-mmlab://jhu/resnet101_gn_ws', backbone=dict(depth=101))
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/tools/ort_test.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/tools/ort_test.py
deleted file mode 100644
index 807b21272a04c86176c19de45fb2407b71e33319..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/tools/ort_test.py
+++ /dev/null
@@ -1,191 +0,0 @@
-import argparse
-import os
-import os.path as osp
-import warnings
-
-import mmcv
-import numpy as np
-import onnxruntime as ort
-import torch
-from mmcv.parallel import MMDataParallel
-from mmcv.runner import get_dist_info
-from mmcv.utils import DictAction
-
-from mmseg.apis import single_gpu_test
-from mmseg.datasets import build_dataloader, build_dataset
-from mmseg.models.segmentors.base import BaseSegmentor
-
-
-class ONNXRuntimeSegmentor(BaseSegmentor):
-
- def __init__(self, onnx_file, cfg, device_id):
- super(ONNXRuntimeSegmentor, self).__init__()
- # get the custom op path
- ort_custom_op_path = ''
- try:
- from mmcv.ops import get_onnxruntime_op_path
- ort_custom_op_path = get_onnxruntime_op_path()
- except (ImportError, ModuleNotFoundError):
- warnings.warn('If input model has custom op from mmcv, \
- you may have to build mmcv with ONNXRuntime from source.')
- session_options = ort.SessionOptions()
- # register custom op for onnxruntime
- if osp.exists(ort_custom_op_path):
- session_options.register_custom_ops_library(ort_custom_op_path)
- sess = ort.InferenceSession(onnx_file, session_options)
- providers = ['CPUExecutionProvider']
- options = [{}]
- is_cuda_available = ort.get_device() == 'GPU'
- if is_cuda_available:
- providers.insert(0, 'CUDAExecutionProvider')
- options.insert(0, {'device_id': device_id})
-
- sess.set_providers(providers, options)
-
- self.sess = sess
- self.device_id = device_id
- self.io_binding = sess.io_binding()
- self.output_names = [_.name for _ in sess.get_outputs()]
- for name in self.output_names:
- self.io_binding.bind_output(name)
- self.cfg = cfg
- self.test_mode = cfg.model.test_cfg.mode
-
- def extract_feat(self, imgs):
- raise NotImplementedError('This method is not implemented.')
-
- def encode_decode(self, img, img_metas):
- raise NotImplementedError('This method is not implemented.')
-
- def forward_train(self, imgs, img_metas, **kwargs):
- raise NotImplementedError('This method is not implemented.')
-
- def simple_test(self, img, img_meta, **kwargs):
- device_type = img.device.type
- self.io_binding.bind_input(
- name='input',
- device_type=device_type,
- device_id=self.device_id,
- element_type=np.float32,
- shape=img.shape,
- buffer_ptr=img.data_ptr())
- self.sess.run_with_iobinding(self.io_binding)
- seg_pred = self.io_binding.copy_outputs_to_cpu()[0]
- # whole might support dynamic reshape
- ori_shape = img_meta[0]['ori_shape']
- if not (ori_shape[0] == seg_pred.shape[-2]
- and ori_shape[1] == seg_pred.shape[-1]):
- seg_pred = torch.from_numpy(seg_pred).float()
- seg_pred = torch.nn.functional.interpolate(
- seg_pred, size=tuple(ori_shape[:2]), mode='nearest')
- seg_pred = seg_pred.long().detach().cpu().numpy()
- seg_pred = seg_pred[0]
- seg_pred = list(seg_pred)
- return seg_pred
-
- def aug_test(self, imgs, img_metas, **kwargs):
- raise NotImplementedError('This method is not implemented.')
-
-
-def parse_args():
- parser = argparse.ArgumentParser(
- description='mmseg onnxruntime backend test (and eval) a model')
- parser.add_argument('config', help='test config file path')
- parser.add_argument('model', help='Input model file')
- parser.add_argument('--out', help='output result file in pickle format')
- parser.add_argument(
- '--format-only',
- action='store_true',
- help='Format the output results without perform evaluation. It is'
- 'useful when you want to format the result to a specific format and '
- 'submit it to the test server')
- parser.add_argument(
- '--eval',
- type=str,
- nargs='+',
- help='evaluation metrics, which depends on the dataset, e.g., "mIoU"'
- ' for generic datasets, and "cityscapes" for Cityscapes')
- parser.add_argument('--show', action='store_true', help='show results')
- parser.add_argument(
- '--show-dir', help='directory where painted images will be saved')
- parser.add_argument(
- '--options', nargs='+', action=DictAction, help='custom options')
- parser.add_argument(
- '--eval-options',
- nargs='+',
- action=DictAction,
- help='custom options for evaluation')
- parser.add_argument(
- '--opacity',
- type=float,
- default=0.5,
- help='Opacity of painted segmentation map. In (0, 1] range.')
- parser.add_argument('--local_rank', type=int, default=0)
- args = parser.parse_args()
- if 'LOCAL_RANK' not in os.environ:
- os.environ['LOCAL_RANK'] = str(args.local_rank)
- return args
-
-
-def main():
- args = parse_args()
-
- assert args.out or args.eval or args.format_only or args.show \
- or args.show_dir, \
- ('Please specify at least one operation (save/eval/format/show the '
- 'results / save the results) with the argument "--out", "--eval"'
- ', "--format-only", "--show" or "--show-dir"')
-
- if args.eval and args.format_only:
- raise ValueError('--eval and --format_only cannot be both specified')
-
- if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
- raise ValueError('The output file must be a pkl file.')
-
- cfg = mmcv.Config.fromfile(args.config)
- if args.options is not None:
- cfg.merge_from_dict(args.options)
- cfg.model.pretrained = None
- cfg.data.test.test_mode = True
-
- # init distributed env first, since logger depends on the dist info.
- distributed = False
-
- # build the dataloader
- # TODO: support multiple images per gpu (only minor changes are needed)
- dataset = build_dataset(cfg.data.test)
- data_loader = build_dataloader(
- dataset,
- samples_per_gpu=1,
- workers_per_gpu=cfg.data.workers_per_gpu,
- dist=distributed,
- shuffle=False)
-
- # load onnx config and meta
- cfg.model.train_cfg = None
- model = ONNXRuntimeSegmentor(args.model, cfg=cfg, device_id=0)
- model.CLASSES = dataset.CLASSES
- model.PALETTE = dataset.PALETTE
-
- efficient_test = False
- if args.eval_options is not None:
- efficient_test = args.eval_options.get('efficient_test', False)
-
- model = MMDataParallel(model, device_ids=[0])
- outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
- efficient_test, args.opacity)
-
- rank, _ = get_dist_info()
- if rank == 0:
- if args.out:
- print(f'\nwriting results to {args.out}')
- mmcv.dump(outputs, args.out)
- kwargs = {} if args.eval_options is None else args.eval_options
- if args.format_only:
- dataset.format_results(outputs, **kwargs)
- if args.eval:
- dataset.evaluate(outputs, args.eval, **kwargs)
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/optim/polynomial_decay_lr_scheduler.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/optim/polynomial_decay_lr_scheduler.py
deleted file mode 100644
index c5ea30b094538269dbb0055ab3163f84d1cf6e90..0000000000000000000000000000000000000000
--- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/optim/polynomial_decay_lr_scheduler.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from torch.optim import Optimizer
-from torch.optim.lr_scheduler import _LRScheduler
-
-
-class PolynomialDecayLRScheduler(_LRScheduler):
- """Polynomial decay LR scheduler.
-
- Args:
- optimizer (Optimizer): Torch optimizer.
- warmup_steps (int): Number of warmup steps.
- total_steps (int): Total number of steps.
- end_lr (float): Final learning rate to achieve over total number of steps.
- zero_lr_warmup_steps (int): Number of steps with a learning rate of value 0.
- power (float): Decay exponent.
- """
- def __init__(self, optimizer: Optimizer, warmup_steps: int, total_steps: int,
- end_lr: float = 0., zero_lr_warmup_steps: int = 0, power: float = 1.):
- self.warmup_steps = warmup_steps
- self.total_steps = total_steps
- self.end_lr = end_lr
- self.zero_lr_warmup_steps = zero_lr_warmup_steps
- self.power = power
- super().__init__(optimizer)
-
- def _get_sched_lr(self, lr: float, step: int):
- if self.zero_lr_warmup_steps > 0 and step <= self.zero_lr_warmup_steps:
- lr = 0
- elif self.warmup_steps > 0 and step <= self.warmup_steps + self.zero_lr_warmup_steps:
- lr_ratio = (step - self.zero_lr_warmup_steps) / float(self.warmup_steps)
- lr = lr_ratio * lr
- elif step >= self.total_steps:
- lr = self.end_lr
- else:
- total_warmup_steps = self.warmup_steps + self.zero_lr_warmup_steps
- lr_range = lr - self.end_lr
- pct_remaining = 1 - (step - total_warmup_steps) / (self.total_steps - total_warmup_steps)
- lr = lr_range * pct_remaining ** self.power + self.end_lr
- return lr
-
- def get_lr(self):
- return [self._get_sched_lr(base_lr, self.last_epoch) for base_lr in self.base_lrs]
diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/scripts/__init__.py b/spaces/GrandaddyShmax/AudioCraft_Plus/scripts/__init__.py
deleted file mode 100644
index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000
--- a/spaces/GrandaddyShmax/AudioCraft_Plus/scripts/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
diff --git a/spaces/GrandaddyShmax/MusicGen_Plus/audiocraft/quantization/core_vq.py b/spaces/GrandaddyShmax/MusicGen_Plus/audiocraft/quantization/core_vq.py
deleted file mode 100644
index e1896bb1788a945a1f7be6369abb255ecf72c7a0..0000000000000000000000000000000000000000
--- a/spaces/GrandaddyShmax/MusicGen_Plus/audiocraft/quantization/core_vq.py
+++ /dev/null
@@ -1,400 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import typing as tp
-
-from einops import rearrange, repeat
-import flashy
-import torch
-from torch import nn, einsum
-import torch.nn.functional as F
-
-
-def exists(val: tp.Optional[tp.Any]) -> bool:
- return val is not None
-
-
-def default(val: tp.Any, d: tp.Any) -> tp.Any:
- return val if exists(val) else d
-
-
-def l2norm(t):
- return F.normalize(t, p=2, dim=-1)
-
-
-def ema_inplace(moving_avg, new, decay: float):
- moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay))
-
-
-def laplace_smoothing(x, n_categories: int, epsilon: float = 1e-5):
- return (x + epsilon) / (x.sum() + n_categories * epsilon)
-
-
-def uniform_init(*shape: int):
- t = torch.empty(shape)
- nn.init.kaiming_uniform_(t)
- return t
-
-
-def sample_vectors(samples, num: int):
- num_samples, device = samples.shape[0], samples.device
-
- if num_samples >= num:
- indices = torch.randperm(num_samples, device=device)[:num]
- else:
- indices = torch.randint(0, num_samples, (num,), device=device)
-
- return samples[indices]
-
-
-def kmeans(samples, num_clusters: int, num_iters: int = 10):
- dim, dtype = samples.shape[-1], samples.dtype
-
- means = sample_vectors(samples, num_clusters)
-
- for _ in range(num_iters):
- diffs = rearrange(samples, "n d -> n () d") - rearrange(
- means, "c d -> () c d"
- )
- dists = -(diffs ** 2).sum(dim=-1)
-
- buckets = dists.max(dim=-1).indices
- bins = torch.bincount(buckets, minlength=num_clusters)
- zero_mask = bins == 0
- bins_min_clamped = bins.masked_fill(zero_mask, 1)
-
- new_means = buckets.new_zeros(num_clusters, dim, dtype=dtype)
- new_means.scatter_add_(0, repeat(buckets, "n -> n d", d=dim), samples)
- new_means = new_means / bins_min_clamped[..., None]
-
- means = torch.where(zero_mask[..., None], means, new_means)
-
- return means, bins
-
-
-def orthgonal_loss_fn(t):
- # eq (2) from https://arxiv.org/abs/2112.00384
- n = t.shape[0]
- normed_codes = l2norm(t)
- identity = torch.eye(n, device=t.device)
- cosine_sim = einsum("i d, j d -> i j", normed_codes, normed_codes)
- return ((cosine_sim - identity) ** 2).sum() / (n ** 2)
-
-
-class EuclideanCodebook(nn.Module):
- """Codebook with Euclidean distance.
-
- Args:
- dim (int): Dimension.
- codebook_size (int): Codebook size.
- kmeans_init (bool): Whether to use k-means to initialize the codebooks.
- If set to true, run the k-means algorithm on the first training batch and use
- the learned centroids as initialization.
- kmeans_iters (int): Number of iterations used for k-means algorithm at initialization.
- decay (float): Decay for exponential moving average over the codebooks.
- epsilon (float): Epsilon value for numerical stability.
- threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes
- that have an exponential moving average cluster size less than the specified threshold with
- randomly selected vector from the current batch.
- """
- def __init__(
- self,
- dim: int,
- codebook_size: int,
- kmeans_init: int = False,
- kmeans_iters: int = 10,
- decay: float = 0.8,
- epsilon: float = 1e-5,
- threshold_ema_dead_code: int = 2,
- ):
- super().__init__()
- self.decay = decay
- init_fn: tp.Union[tp.Callable[..., torch.Tensor], tp.Any] = uniform_init if not kmeans_init else torch.zeros
- embed = init_fn(codebook_size, dim)
-
- self.codebook_size = codebook_size
-
- self.kmeans_iters = kmeans_iters
- self.epsilon = epsilon
- self.threshold_ema_dead_code = threshold_ema_dead_code
-
- self.register_buffer("inited", torch.Tensor([not kmeans_init]))
- self.register_buffer("cluster_size", torch.zeros(codebook_size))
- self.register_buffer("embed", embed)
- self.register_buffer("embed_avg", embed.clone())
-
- @torch.jit.ignore
- def init_embed_(self, data):
- if self.inited:
- return
-
- embed, cluster_size = kmeans(data, self.codebook_size, self.kmeans_iters)
- self.embed.data.copy_(embed)
- self.embed_avg.data.copy_(embed.clone())
- self.cluster_size.data.copy_(cluster_size)
- self.inited.data.copy_(torch.Tensor([True]))
- # Make sure all buffers across workers are in sync after initialization
- flashy.distrib.broadcast_tensors(self.buffers())
-
- def replace_(self, samples, mask):
- modified_codebook = torch.where(
- mask[..., None], sample_vectors(samples, self.codebook_size), self.embed
- )
- self.embed.data.copy_(modified_codebook)
-
- def expire_codes_(self, batch_samples):
- if self.threshold_ema_dead_code == 0:
- return
-
- expired_codes = self.cluster_size < self.threshold_ema_dead_code
- if not torch.any(expired_codes):
- return
-
- batch_samples = rearrange(batch_samples, "... d -> (...) d")
- self.replace_(batch_samples, mask=expired_codes)
- flashy.distrib.broadcast_tensors(self.buffers())
-
- def preprocess(self, x):
- x = rearrange(x, "... d -> (...) d")
- return x
-
- def quantize(self, x):
- embed = self.embed.t()
- dist = -(
- x.pow(2).sum(1, keepdim=True)
- - 2 * x @ embed
- + embed.pow(2).sum(0, keepdim=True)
- )
- embed_ind = dist.max(dim=-1).indices
- return embed_ind
-
- def postprocess_emb(self, embed_ind, shape):
- return embed_ind.view(*shape[:-1])
-
- def dequantize(self, embed_ind):
- quantize = F.embedding(embed_ind, self.embed)
- return quantize
-
- def encode(self, x):
- shape = x.shape
- # pre-process
- x = self.preprocess(x)
- # quantize
- embed_ind = self.quantize(x)
- # post-process
- embed_ind = self.postprocess_emb(embed_ind, shape)
- return embed_ind
-
- def decode(self, embed_ind):
- quantize = self.dequantize(embed_ind)
- return quantize
-
- def forward(self, x):
- shape, dtype = x.shape, x.dtype
- x = self.preprocess(x)
- self.init_embed_(x)
-
- embed_ind = self.quantize(x)
- embed_onehot = F.one_hot(embed_ind, self.codebook_size).type(dtype)
- embed_ind = self.postprocess_emb(embed_ind, shape)
- quantize = self.dequantize(embed_ind)
-
- if self.training:
- # We do the expiry of code at that point as buffers are in sync
- # and all the workers will take the same decision.
- self.expire_codes_(x)
- ema_inplace(self.cluster_size, embed_onehot.sum(0), self.decay)
- embed_sum = x.t() @ embed_onehot
- ema_inplace(self.embed_avg, embed_sum.t(), self.decay)
- cluster_size = (
- laplace_smoothing(self.cluster_size, self.codebook_size, self.epsilon)
- * self.cluster_size.sum()
- )
- embed_normalized = self.embed_avg / cluster_size.unsqueeze(1)
- self.embed.data.copy_(embed_normalized)
-
- return quantize, embed_ind
-
-
-class VectorQuantization(nn.Module):
- """Vector quantization implementation.
- Currently supports only euclidean distance.
-
- Args:
- dim (int): Dimension
- codebook_size (int): Codebook size
- codebook_dim (int): Codebook dimension. If not defined, uses the specified dimension in dim.
- decay (float): Decay for exponential moving average over the codebooks.
- epsilon (float): Epsilon value for numerical stability.
- kmeans_init (bool): Whether to use kmeans to initialize the codebooks.
- kmeans_iters (int): Number of iterations used for kmeans initialization.
- threshold_ema_dead_code (int):
- channels_last (bool): Channels are the last dimension in the input tensors.
- commitment_weight (float): Weight for commitment loss.
- orthogonal_reg_weight (float): Orthogonal regularization weights.
- orthogonal_reg_active_codes_only (bool): Apply orthogonal regularization only on active codes.
- orthogonal_reg_max_codes (optional int): Maximum number of codes to consider
- for orthogonal regulariation.
- threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes
- that have an exponential moving average cluster size less than the specified threshold with
- randomly selected vector from the current batch.
- """
- def __init__(
- self,
- dim: int,
- codebook_size: int,
- codebook_dim: tp.Optional[int] = None,
- decay: float = 0.8,
- epsilon: float = 1e-5,
- kmeans_init: bool = False,
- kmeans_iters: int = 10,
- threshold_ema_dead_code: int = 2,
- channels_last: bool = False,
- commitment_weight: float = 1.,
- orthogonal_reg_weight: float = 0.0,
- orthogonal_reg_active_codes_only: bool = False,
- orthogonal_reg_max_codes: tp.Optional[int] = None,
- ):
- super().__init__()
- _codebook_dim: int = default(codebook_dim, dim)
-
- requires_projection = _codebook_dim != dim
- self.project_in = (nn.Linear(dim, _codebook_dim) if requires_projection else nn.Identity())
- self.project_out = (nn.Linear(_codebook_dim, dim) if requires_projection else nn.Identity())
-
- self.epsilon = epsilon
- self.commitment_weight = commitment_weight
-
- self.orthogonal_reg_weight = orthogonal_reg_weight
- self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only
- self.orthogonal_reg_max_codes = orthogonal_reg_max_codes
-
- self._codebook = EuclideanCodebook(dim=_codebook_dim, codebook_size=codebook_size,
- kmeans_init=kmeans_init, kmeans_iters=kmeans_iters,
- decay=decay, epsilon=epsilon,
- threshold_ema_dead_code=threshold_ema_dead_code)
- self.codebook_size = codebook_size
-
- self.channels_last = channels_last
-
- @property
- def codebook(self):
- return self._codebook.embed
-
- @property
- def inited(self):
- return self._codebook.inited
-
- def _preprocess(self, x):
- if not self.channels_last:
- x = rearrange(x, "b d n -> b n d")
- return x
-
- def _postprocess(self, quantize):
- if not self.channels_last:
- quantize = rearrange(quantize, "b n d -> b d n")
- return quantize
-
- def encode(self, x):
- x = self._preprocess(x)
- x = self.project_in(x)
- embed_in = self._codebook.encode(x)
- return embed_in
-
- def decode(self, embed_ind):
- quantize = self._codebook.decode(embed_ind)
- quantize = self.project_out(quantize)
- quantize = self._postprocess(quantize)
- return quantize
-
- def forward(self, x):
- device = x.device
- x = self._preprocess(x)
-
- x = self.project_in(x)
- quantize, embed_ind = self._codebook(x)
-
- if self.training:
- quantize = x + (quantize - x).detach()
-
- loss = torch.tensor([0.0], device=device, requires_grad=self.training)
-
- if self.training:
- if self.commitment_weight > 0:
- commit_loss = F.mse_loss(quantize.detach(), x)
- loss = loss + commit_loss * self.commitment_weight
-
- if self.orthogonal_reg_weight > 0:
- codebook = self.codebook
-
- if self.orthogonal_reg_active_codes_only:
- # only calculate orthogonal loss for the activated codes for this batch
- unique_code_ids = torch.unique(embed_ind)
- codebook = codebook[unique_code_ids]
-
- num_codes = codebook.shape[0]
- if exists(self.orthogonal_reg_max_codes) and num_codes > self.orthogonal_reg_max_codes:
- rand_ids = torch.randperm(num_codes, device=device)[:self.orthogonal_reg_max_codes]
- codebook = codebook[rand_ids]
-
- orthogonal_reg_loss = orthgonal_loss_fn(codebook)
- loss = loss + orthogonal_reg_loss * self.orthogonal_reg_weight
-
- quantize = self.project_out(quantize)
- quantize = self._postprocess(quantize)
-
- return quantize, embed_ind, loss
-
-
-class ResidualVectorQuantization(nn.Module):
- """Residual vector quantization implementation.
-
- Follows Algorithm 1. in https://arxiv.org/pdf/2107.03312.pdf
- """
- def __init__(self, *, num_quantizers, **kwargs):
- super().__init__()
- self.layers = nn.ModuleList(
- [VectorQuantization(**kwargs) for _ in range(num_quantizers)]
- )
-
- def forward(self, x, n_q: tp.Optional[int] = None):
- quantized_out = 0.0
- residual = x
-
- all_losses = []
- all_indices = []
-
- n_q = n_q or len(self.layers)
-
- for i, layer in enumerate(self.layers[:n_q]):
- quantized, indices, loss = layer(residual)
- residual = residual - quantized
- quantized_out = quantized_out + quantized
- all_indices.append(indices)
- all_losses.append(loss)
-
- out_losses, out_indices = map(torch.stack, (all_losses, all_indices))
- return quantized_out, out_indices, out_losses
-
- def encode(self, x: torch.Tensor, n_q: tp.Optional[int] = None) -> torch.Tensor:
- residual = x
- all_indices = []
- n_q = n_q or len(self.layers)
- for layer in self.layers[:n_q]:
- indices = layer.encode(residual)
- quantized = layer.decode(indices)
- residual = residual - quantized
- all_indices.append(indices)
- out_indices = torch.stack(all_indices)
- return out_indices
-
- def decode(self, q_indices: torch.Tensor) -> torch.Tensor:
- quantized_out = torch.tensor(0.0, device=q_indices.device)
- for i, indices in enumerate(q_indices):
- layer = self.layers[i]
- quantized = layer.decode(indices)
- quantized_out = quantized_out + quantized
- return quantized_out
diff --git a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/s_multimae/lr.py b/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/s_multimae/lr.py
deleted file mode 100644
index 10787df6427b0914e95c88513b9e50bd3349765f..0000000000000000000000000000000000000000
--- a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/s_multimae/lr.py
+++ /dev/null
@@ -1,78 +0,0 @@
-from abc import ABCMeta, abstractmethod
-from typing import Dict
-
-class BaseLR():
- __metaclass__ = ABCMeta
-
- @abstractmethod
- def get_lr(self, cur_iter: int): pass
-
- def state_dict(self) -> Dict:
- """Returns the state of the scheduler as a :class:`dict`.
-
- It contains an entry for every variable in self.__dict__ which
- is not the optimizer.
- """
- return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
-
- def load_state_dict(self, state_dict: Dict) -> None:
- """Loads the schedulers state.
-
- Args:
- state_dict (dict): scheduler state. Should be an object returned
- from a call to :meth:`state_dict`.
- """
- self.__dict__.update(state_dict)
-
-
-class PolyLR(BaseLR):
- def __init__(self, start_lr: float, lr_power: float, total_iters: int):
- self.start_lr = start_lr
- self.lr_power = lr_power
- self.total_iters = total_iters + 0.0
-
- def get_lr(self, cur_iter: int) -> float:
- return self.start_lr * ((1 - float(cur_iter) / self.total_iters) ** self.lr_power)
-
-class LinearLR(BaseLR):
- def __init__(self, start_lr: float, end_lr: float, total_iters: int):
- """@Deprecated
- """
- self.start_lr = start_lr
- self.end_lr = end_lr
- self.total_iters = float(total_iters)
-
- self.b = self.start_lr
- self.a = (self.end_lr - self.start_lr) / self.total_iters
-
-
- def get_lr(self, cur_iter: int) -> float:
- return self.a * cur_iter + self.b
-
-class LinearLRRestart(BaseLR):
- def __init__(
- self,
- start_lr: float,
- end_lr: float,
- warmup_epoch: int,
- ):
- '''Note: Remember to set epoch at the begining of each epoch'''
- self.start_lr = start_lr
- self.end_lr = end_lr
- self.warmup_epoch = warmup_epoch
-
- def set_epoch(self, epoch: int, total_iters_per_epoch: int) -> None:
- '''
- if epoch is between 1->100, upperbound will be 100
- if epoch is between 101->200, upperbound will be 200
- '''
- upperbound = (((epoch-1) // self.warmup_epoch) + 1) * self.warmup_epoch
- total_iters = upperbound * total_iters_per_epoch
-
- self.b = self.start_lr
- self.a = (self.end_lr - self.start_lr) / total_iters
-
- def get_lr(self, cur_iter: int) -> float:
- """Note: the beginning cur_iter is 0
- """
- return self.a * cur_iter + self.b
diff --git a/spaces/Hallucinate/demo/app.py b/spaces/Hallucinate/demo/app.py
deleted file mode 100644
index 1a5af519c45f001a682b056efcf0fb88e91120f3..0000000000000000000000000000000000000000
--- a/spaces/Hallucinate/demo/app.py
+++ /dev/null
@@ -1,905 +0,0 @@
-#————————————————————Credits——————————————————
-#borrowing heavily from deforum stable diffusion
-
-
-#Overview
-#5. Gradio Interface
-#1. Setup
-#2. Prompts
-#3. Video
-#4. Run
-
-
-#————————————————————1.1. Setup————————————————————————
-
-import subprocess, time, gc, os, sys
-
-def setup_environment():
- start_time = time.time()
- print_subprocess = False
- os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb: 256"
- #PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:512
- use_xformers_for_colab = True
- try:
- ipy = get_ipython()
- except:
- ipy = 'could not get_ipython'
- if 'google.colab' in str(ipy):
- print("..setting up environment")
-
- # weird hack
- #import torch
-
- all_process = [
- ['git', 'clone', 'https://github.com/deforum-art/deforum-stable-diffusion'],
- ['pip', 'install', 'omegaconf', 'einops==0.4.1', 'pytorch-lightning==1.7.7', 'torchmetrics', 'transformers', 'safetensors', 'kornia'],
- ['pip', 'install', 'accelerate', 'ftfy', 'jsonmerge', 'matplotlib', 'resize-right', 'timm', 'torchdiffeq','scikit-learn','torchsde','open-clip-torch','numpngw'],
- ]
- for process in all_process:
- running = subprocess.run(process,stdout=subprocess.PIPE).stdout.decode('utf-8')
- if print_subprocess:
- print(running)
- with open('deforum-stable-diffusion/src/k_diffusion/__init__.py', 'w') as f:
- f.write('')
- sys.path.extend([
- 'deforum-stable-diffusion/',
- 'deforum-stable-diffusion/src',
- ])
- if use_xformers_for_colab:
-
- print("..installing triton and xformers")
-
- all_process = [['pip', 'install', 'triton==2.0.0.dev20221202', 'xformers==0.0.16']]
- for process in all_process:
- running = subprocess.run(process,stdout=subprocess.PIPE).stdout.decode('utf-8')
- if print_subprocess:
- print(running)
- else:
- sys.path.extend([
- 'src'
- ])
- end_time = time.time()
- print(f"..environment set up in {end_time-start_time:.0f} seconds")
- return
-
-setup_environment()
-
-#————————————————————1.2. Imports————————————————————————
-
-import os
-import torch
-import random
-import clip
-import gradio as gr
-import re
-import random
-
-from base64 import b64encode
-
-#from k_diffusion.external import CompVisDenoiser, CompVisVDenoiser
-import py3d_tools as p3dT
-
-from IPython import display
-from types import SimpleNamespace
-from helpers.save_images import get_output_folder
-from helpers.settings import load_args
-from helpers.render import render_animation, render_input_video, render_image_batch, render_interpolation
-from helpers.model_load import make_linear_decode, load_model, get_model_output_paths
-from helpers.aesthetics import load_aesthetics_model
-from diffusers import DiffusionPipeline
-from transformers import (
- WhisperForConditionalGeneration,
- WhisperProcessor,
-)
-from share_btn import community_icon_html, loading_icon_html, share_js
-#from plms import PLMSSampler
-
-#from AdaBins-main import infer, model_io, utils
-#from AdaBins-main.models import unet_adaptive_bins.py
-
-import gradio as gr
-from datasets import load_dataset
-from PIL import Image
-
-import requests
-
-#————————————————————1.3. Token Setup———————————————
-
-MY_SECRET_TOKEN=os.environ.get('HF_TOKEN_SD')
-device = "cuda" if torch.cuda.is_available() else "cpu" #is this needed?
-
-#———————————————————— infer from SD ———————————————
-
-"""
-word_list_dataset = load_dataset("stabilityai/word-list", data_files="list.txt", use_auth_token=True)
-word_list = word_list_dataset["train"]['text']
-
-is_gpu_busy = False
-def infer(prompt, negative, scale):
- global is_gpu_busy
- for filter in word_list:
- if re.search(rf"\b{filter}\b", prompt):
- raise gr.Error("Unsafe content found. Please try again with different prompts.")
-
- images = []
- url = os.getenv('JAX_BACKEND_URL')
- payload = {'prompt': prompt, 'negative_prompt': negative, 'guidance_scale': scale}
- images_request = requests.post(url, json = payload)
- for image in images_request.json()["images"]:
- image_b64 = (f"data:image/jpeg;base64,{image}")
- images.append(image_b64)
-
- return images
- """
-
-
-#————————————————————5.1 Gradio Interface ———————————————
-
-#CSS defines the style of the interface
-css = """
- .gradio-container {
- font-family: 'IBM Plex Sans', sans-serif;
- }
- .gr-button {
- color: white;
- border-color: black;
- background: black;
- }
- input[type='range'] {
- accent-color: black;
- }
- .dark input[type='range'] {
- accent-color: #dfdfdf;
- }
- .container {
- max-width: 730px;
- margin: auto;
- padding-top: 1.5rem;
- }
-
- .details:hover {
- text-decoration: underline;
- }
- .gr-button {
- white-space: nowrap;
- }
- .gr-button:focus {
- border-color: rgb(147 197 253 / var(--tw-border-opacity));
- outline: none;
- box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
- --tw-border-opacity: 1;
- --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
- --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
- --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
- --tw-ring-opacity: .5;
- }
- .footer {
- margin-bottom: 45px;
- margin-top: 35px;
- text-align: center;
- border-bottom: 1px solid #e5e5e5;
- }
- .footer>p {
- font-size: .8rem;
- display: inline-block;
- padding: 0 10px;
- transform: translateY(10px);
- background: #2596be; #changed this from: white;
- }
- .dark .footer {
- border-color: #303030;
-
- }
- .dark .footer>p {
- background: #2596be; #changed this from 0b0f19;
- }
- .prompt h4{
- margin: 1.25em 0 .25em 0;
- font-weight: bold;
- font-size: 115%;
- }
- .animate-spin {
- animation: spin 1s linear infinite;
- }
- @keyframes spin {
- from {
- transform: rotate(0deg);
- }
- to {
- transform: rotate(360deg);
- }
- }
- #share-btn-container {
- display: flex; margin-top: 1.5rem !important; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
- }
- #share-btn {
- all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;
- }
- #share-btn * {
- all: unset;
- }
-"""
-
-#creates the interface object with the style outlined above
-block = gr.Blocks(css=css)
-
-#HTML defines the layout of the interface
-with block:
- gr.HTML(
- """
-
-
-
-
- Hallucinate
-
-
-
- Instantly produce cinematics for your audio.
- Create unique Spotify Canvases for all your tracks.
-
-
- """
- )
-
- #within the group
- with gr.Group():
-
- #first create a box
- with gr.Box():
-
- #in the box create a row
- with gr.Row().style(mobile_collapse=False, equal_height=True):
-
- #in the row add a video input (left) #UPDATE THIS
- image_input = gr.Image(
- label="Initialize with image",
- show_label=False,
- source="upload", #"microphone"
- type="filepath"
- )
-
- #in the row add a button to run the model (right) #UPDATE THIS
- btn = gr.Button("Hallucinate")
-
- #add an output field to the box #UPDATE THIS
- video_output = gr.Video(show_label=False, elem_id="result-textarea")
-
- #add share functions
- with gr.Group(elem_id="share-btn-container"):
- community_icon = gr.HTML(community_icon_html, visible=False)
- loading_icon = gr.HTML(loading_icon_html, visible=False)
- share_button = gr.Button("Share to community", elem_id="share-btn", visible=False)
-
-
- #add button functions
-
- #the input button is here
-
- #btn.click(render_input_video, inputs=[video_input], outputs=[video_output, community_icon, loading_icon, share_button])
- #share_button.click(None, [], [], _js=share_js)
-
- #create footer
- gr.HTML(
- """
-
- """
- )
- with gr.Accordion(label="License", open=False):
- gr.HTML(
- """
-
LICENSE
-The model is licensed with a CreativeML OpenRAIL++ license. The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license. The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please read the license
-
Biases and content acknowledgment
-Despite how impressive being able to turn text into image is, beware to the fact that this model may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography and violence. The model was trained on the LAION-5B dataset, which scraped non-curated image-text-pairs from the internet (the exception being the removal of illegal content) and is meant for research purposes. You can read more in the model card
-
- """
- )
-
-#launch
-#block.launch()#share=True)
-
-
-
-
-
-
-#———————————————————— 1.4. Path Setup (input, output, drive)————————————————————————
-
-#this creates FileNotFoundError: [Errno 2] No such file or directory:
-#/home/user/app/configs/v1-inference.yaml - is this a github link?
-#even though the yaml file is uploaded to files. yaml file should be available through DSD import?
-
-def Root():
- models_path = "models" #@param {type:"string"}
- configs_path = "configs" #@param {type:"string"}
- output_path = "outputs" #@param {type:"string"}
- mount_google_drive = True #@param {type:"boolean"}
- models_path_gdrive = "/content/drive/MyDrive/AI/models" #@param {type:"string"}
- output_path_gdrive = "/content/drive/MyDrive/AI/StableDiffusion" #@param {type:"string"}
-
- #@markdown **Model Setup**
- map_location = "cuda" #@param ["cpu", "cuda"]
- model_config = "v1-inference.yaml" #@param ["custom","v2-inference.yaml","v2-inference-v.yaml","v1-inference.yaml"]
- model_checkpoint = "Protogen_V2.2.ckpt" #@param ["custom","v2-1_768-ema-pruned.ckpt","v2-1_512-ema-pruned.ckpt","768-v-ema.ckpt","512-base-ema.ckpt","Protogen_V2.2.ckpt","v1-5-pruned.ckpt","v1-5-pruned-emaonly.ckpt","sd-v1-4-full-ema.ckpt","sd-v1-4.ckpt","sd-v1-3-full-ema.ckpt","sd-v1-3.ckpt","sd-v1-2-full-ema.ckpt","sd-v1-2.ckpt","sd-v1-1-full-ema.ckpt","sd-v1-1.ckpt", "robo-diffusion-v1.ckpt","wd-v1-3-float16.ckpt"]
- custom_config_path = "" #"https://github.com/realhallucinate/deforum-stable-diffusion-gradioUI/blob/main/configs/v1-inference.yaml"# replaced just an empty string: "" with a diret link #@param {type:"string"}
- custom_checkpoint_path = "" #@param {type:"string"}
- return locals()
-
-root = Root()
-root = SimpleNamespace(**root)
-
-root.models_path, root.output_path = get_model_output_paths(root)
-root.model, root.device = load_model(root, load_on_run_all=True, check_sha256=True, map_location=root.map_location)
-#root.model, root.device = load_model(root, load_on_run_all=True, check_sha256=True, map_location=torch.device('cpu'))
-
-
-#——————————————————— 2.1. Prompt Base ———————————————————————— #----------------CONSIDER THIS FOR INPUT/LOCK---------------
-
-#need to update the prompt base
-
-medium = {"pixar image | matte painting | 3D render | oil painting | photograph | sculpture |digital illustration | watercolor | colored pencil sketch | photo shoot | acrylic painting"}
-description = {"silly | sexy | golden | exotic | beautiful | elegant | creepy | hilarious | evil | Angelic | sublime | ridiculous"}
-subject = {"rococo cauliflower headdress | cauliflower cornucopia | macro romanesco | cauliflower Cthulhu | cauliflower nuclear explosion | Cauliflower mushroom cloud | Hubble cauliflower nebula | cauliflower infestation | steampunk cauliflower | magic rubber cauliflower | psychedelic cauliflower | cauliflower couture"}
-details = {"flowers | ornaments | pearls | raindrops | glasses"}
-artist = {"[3$$ James Jean | Lucian Freud | tomm moore | wes anderson | ernst haeckl | tim burton | jean pierre jeunet | jean giraud moebius | takashi murakami | ross draws | artgerm | alvin ailey | Zdzisław Beksiński | Arthur Rackham | Dariusz Zawadzki | thomas kincade | greg rutkowski | rembrandt | HR Giger | jama jurabaev | wenwei zhu | frank franzzeta | mcbess | sakimi chan | brosmind | steve simpson | jim henson | Nicoletta Ceccoli | Margaret Keane | Mark Ryden | Severin Krøyer | Marie Krøyer | Karl Madsen| Laurits Tuxen| Carl Locher| Viggo Johansen| Thorvald Niss | Holger Drachmann | Raluca bararu | walton ford | josh Keyes | Marco Mazzoni | Susan Helen Strok | R Crumb | Beatrix potter | shaw brothers | victor moscoso | Thomas Kinkade | Vincent Van Gogh | Leonid Afremov | Claude Monet | Edward Hopper | Norman Rockwell | William-Adolphe Bouguereau | Albert Bierstadt | John Singer Sargent | Pierre-Auguste Renoir | Frida Kahlo | John William Waterhouse | Winslow Homer | Walt Disney | Thomas Moran | Phil Koch | Paul Cézanne | Camille Pissarro | Erin Hanson | Thomas Cole | Raphael | Steve Henderson | Pablo Picasso | Caspar David Friedrich | Ansel Adams | Diego Rivera | Steve McCurry | Bob Ross | John Atkinson Grimshaw | Rob Gonsalves | Paul Gauguin | James Tissot | Edouard Manet | Alphonse Mucha | Alfred Sisley | Fabian Perez | Gustave Courbet | Zaha Hadid | Jean-Léon Gérôme | Carl Larsson | Mary Cassatt | Sandro Botticelli | Daniel Ridgway Knight | Joaquín Sorolla | Andy Warhol | Kehinde Wiley | Alfred Eisenstaedt | Gustav Klimt | Dante Gabriel Rossetti | Tom Thomson | Edgar Degas | Utagawa Hiroshige | Camille Corot | Edward Steichen | David Hockney | Ivan Aivazovsky | Josephine Wall | Peter Paul Rubens | Henri Rousseau | Edward Burne-Jones | Pixar | Alexander McQueen | Anders Zorn | Jean Auguste Dominique Ingres | Franz Xaver Winterhalter | Katsushika Hokusai | John Constable | Canaletto | Shepard Fairey | Gordon Parks | George Inness | Anthony van Dyck | Vivian Maier | Catrin Welz-Stein | Lawren Harris | Salvador Dali | David Bowie | Agnes Cecile | Titian | Martin Johnson Heade | Scott Naismith | William Morris | Berthe Morisot | Vladimir Kush | William Holman Hunt | Edvard Munch | Joseph Mallord William Turner | Gustave Doré | Thomas Eakins | Ilya Repin | Amedeo Modigliani | Johannes Vermeer | Eyvind EarleIvan Shishkin | Rembrandt Van Rijn | Gil Elvgren | Nicholas Roerich | Henri Matisse | Thomas Gainsborough | Artgerm | Studio Ghibli | Grant Wood | Jeremy Mann | Mark Keathley | Maxfield Parrish | Andrew Wyeth | RHADS | David Lynch | Frederic Remington | Jan Van Eyck | Mikko Lagerstedt | Banksy | Michael Cheval | Anna Razumovskaya | Jean-François Millet | Thomas W Schaller | Charlie Bowater | El Greco | Paolo Roversi | Carne Griffiths | Man Ray | August Sander | Andrew Macara | Evelyn De Morgan | William Blake | Sally Mann | Oleg Oprisco | Yuumei | Helmut Newton | Henry Ossawa Tanner | Asher Brown Durand | teamLab | August Macke | Armand Guillaumin | Terry Redlin | Antoine Blanchard | Anna Ancher | Ohara Koson | Walter Langley | Yayoi Kusama | Stan Lee | Chuck Close | Albert Edelfelt | Mark Seliger | Eugene Delacroix | John Lavery | Theo van Rysselberghe | Marc Chagall | Rolf Armstrong | Brent Heighton | A.J.Casson | Egon Schiele | Maximilien Luce | Georges Seurat | George Frederic Watts | Arthur Hughes | Anton Mauve | Lucian Freud | Jessie Willcox Smith | Leonardo Da Vinci | Edward John Poynter | Brooke Shaden | J.M.W. Turner | Wassily Kandinsky | Wes Anderson | Jean-Honoré Fragonard | Amanda Clark | Tom Roberts | Antonello da Messina | Makoto Shinkai | Hayao Miyazaki | Slim Aarons | Alfred Stevens | Albert Lynch | Andre Kohn | Daniel Garber | Jacek Yerka | Beatrix Potter | Rene Magritte | Georgia O'Keeffe | Isaac Levitan | Frank Lloyd Wright | Gustave Moreau | Ford Madox Brown | Ai Weiwei | Tim Burton | Alfred Cheney Johnston | Duy Huynh | Michael Parkes | Tintoretto | Archibald Thorburn | Audrey Kawasaki | George Lucas | Arthur Streeton | Albrecht Durer | Andrea Kowch | Dorina Costras | Alex Ross | Hasui Kawase | Lucas Cranach the Elder | Briton Rivière | Antonio Mora | Mandy Disher | Henri-Edmond Cross | Auguste Toulmouche | Hubert Robert | Syd Mead | Carl Spitzweg | Alyssa Monks | Edward Lear | Ralph McQuarrie | Sailor Moon | Simon Stalenhag | Edward Robert Hughes | Jules Bastien-Lepage | Richard S. Johnson | Rockwell Kent | Sparth | Arnold Böcklin | Lovis Corinth | Arnold Bocklin | Robert Hagan | Gregory Crewdson | Thomas Benjamin Kennington | Abbott Handerson Thayer | Gilbert Stuart | Louis Comfort Tiffany | Raphael Lacoste | Jean Marc Nattier | Janek Sedlar | Sherree Valentine Daines | Alexander Jansson | James Turrell | Alex Grey | Henri De Toulouse Lautrec | Anton Pieck | Ramon Casas | Andrew Atroshenko | Andy Kehoe | Andreas Achenbach | H.P. Lovecraft | Eric Zener | Kunisada | Jimmy Lawlor | Quentin Tarantino | Marianne North | Vivienne Westwood | Tom Bagshaw | Jeremy Lipking | John Martin | Cindy Sherman | Scott Listfield | Alexandre Cabanel | Arthur Rackham | Arthur Hacker | Henri Fantin Latour | Mark Ryden | Peter Holme IIIT | ed Nasmith | Bill Gekas | Paul Strand | Anne Stokes | David Teniers the Younger | Alan Lee | Ed Freeman | Andrey Remnev | Alasdair McLellan | Botero | Vittorio Matteo Corcos | Ed Mell | Worthington Whittredge | Jakub Różalski | Alex Gross | Edward Weston | Ilya Kuvshinov | Francisco De Goya | Balthus | J.C. Leyendecker | Nathan Wirth]"}
-realism = {"[4$$ highly detailed | photorealistic | realistic | hypermaximalist | hyperrealism, intricate | HD | 8k | 4k | octane render | subsurface scattering raytracing | depth of field | bokeh | redshift render | abstract illusionism | Atmospheric | Dramatic lighting | Anthropomorphic | 8k | Very detailed | Cinematic lighting | Unreal engine | Octane render | Photorealistic | Hyperrealistic | Sharp focus | Rim lighting | Soft lighting | Volumetric | Surreal | Realistic CGI | Fantastic backlight | HDR | Studio light | Internal glow | Iridescent | Cyberpunk | Steampunk | Intricate | filigree | Bionic futurism | Ray tracing | Symmetrical | Atompunk | Multiverse | Concept art | Time loop | Maximum texture | Futurism | Dynamic | retrowave | afrofuturism | intricate and highly detailed | intricate and highly detailed | intricate and highly detailed | intricate and highly detailed | intricate and highly detailed | photorealistic | photorealistic | photorealistic | photorealistic]"}
-repository = {"Artstation"}
-#setting = {"corporate office setting | abandoned warehouse | schoolhouse | victorian train station | victorian room | Lake | Field of wild flowers | submarine | tennis court | mcdonalds | swamp | assembly line | surface of the moon | museum | outer space |storefront display"}
-#time = {"morning | noon | night | evening | dawn"}
-
-#'animation_mode: None' (default) batches on this list of 'prompts'
-prompts = [
- f"A beautiful {medium} of a {description}{subject} with {details}, in the style of {artist}. {realism} design, trending on {repository}"
-]
-
-#——————————————————— 2.2. Prompt Template Builder ————————————————————————
-
-#a function to select a set of prompts
-
-# Define the `pick_variant` function that takes in a template string
-def pick_variant(template):
- # If the template is None, return None
- if template is None:
- return None
-
- # Set `out` to be the template string
- out = template
-
- # Use a regular expression to find all instances of `{...}` in the template
- # The regular expression `r'\{[^{}]*?}'` searches for all sequences of characters
- # surrounded by curly braces that do not contain other curly braces.
- variants = re.findall(r'\{[^{}]*?}', out)
-
- # Iterate over each variant found in the template
- for v in variants:
- # Split the variant string by the vertical bar (|)
- opts = v.strip("{}").split('|')
- # Replace the variant in the `out` string with a random option
- out = out.replace(v, random.choice(opts))
-
- # Use a regular expression to find all instances of `[...]` in the template
- # The regular expression `r'\[[^\[\]]*?]'` searches for all sequences of characters
- # surrounded by square brackets that do not contain other square brackets.
- combinations = re.findall(r'\[[^\[\]]*?]', out)
-
- # Iterate over each combination found in the template
- for c in combinations:
- # Remove the square brackets from the combination
- sc = c.strip("[]")
- # Split the combination string by '$$'
- parts = sc.split('$$')
- # Initialize the number of options to pick to `None`
- n_pick = None
-
- # If there are more than 2 parts, raise an error
- if len(parts) > 2:
- raise ValueError(" we do not support more than 1 $$ in a combination")
- # If there are 2 parts, set the number of options to pick to the first part
- if len(parts) == 2:
- sc = parts[1]
- n_pick = int(parts[0])
- # Split the combination string by the vertical bar (|)
- opts = sc.split('|')
- # If the number of options to pick is not set, set it to a random integer between 1 and the number of options
- if not n_pick:
- n_pick = random.randint(1,len(opts))
-
- # Sample `n_pick` options from the options list
- sample = random.sample(opts, n_pick)
- # Replace the combination in the `out` string with a comma-separated string of the picked options
- out = out.replace(c, ', '.join(sample))
-
- # If there were any variants or combinations found in the template, call `pick_variant` again with the new `out` string
- if len(variants+combinations) > 0:
- return pick_variant(out)
- # If there were no variants or combinations found, return the final `out` string
- return out
-
-#'animation_mode: None' (default) batches on this list of 'prompts'
-for prompt in prompts:
- text_prompt = pick_variant(prompt)
-
-#print('Text prompt selected:', "\n")
-#print(text_prompt)
-
-#———————————————————— 2.3. Prompt Selector: Video ————————————————————————
-
-#create a string of frame intervals and prompts
-def prompt_intervals(prompts, framecount, stepsize):
-
- timesteps = []
- for frame in range(0, framecount):
- timesteps.append(frame * stepsize)
-
- animation_prompt = ""
- for timestep in timesteps:
- for prompt in prompts:
- p = pick_variant(prompt)
- animation_prompt += (str(timestep) + ": " + p + ", ")
-
- animation_prompts = str(animation_prompt)
- return animation_prompts
-
-#@markdown Here you can select framecount and stepsize and create a selection of animation prompts
-
-#MAKE INTERACTABLE IN DEMO? OR HAVE FIXED 8 FPS?
-framecount = 8 #fps #----------------CONSIDER THIS FOR INPUT/LOCK---------------
-stepsize = 25 #time interval between prompts #----------------CONSIDER THIS FOR INPUT/LOCK---------------
-
-#'animation_mode: 2D' works with this list of 'animation_prompts'
-animation_prompts = prompt_intervals(prompts, framecount, stepsize)
-
-#print('Animation prompts selected:',"\n")
-#print(animation_prompts)
-
-
-#———————————————————— 3.1. Video Settings————————————————————————
- #HARDCODE THE DIFFERENT CAMERA SETTINGS HERE LATER?
-
-#This function only sets and outputs the arguments for the ANIMATION process
-def DeforumAnimArgs():
-
- #ANIMATION_MODE IS A KEY ARG!
-
- #@markdown ####**Animation:**
- animation_mode = '3D' #@param ['None', '2D', '3D', 'Video Input', 'Interpolation'] {type:'string'} #THIS IS A KEY ARG!
- max_frames = 1000 #@param {type:"number"}
- border = 'replicate' #@param ['wrap', 'replicate'] {type:'string'}
-
- #@markdown ####**Motion Parameters:**
- angle = "0:(0)"#@param {type:"string"} #----------------CONSIDER THIS FOR INPUT/LOCK---------------
- zoom = "0:(1.04)"#@param {type:"string"} #----------------CONSIDER THIS FOR INPUT/LOCK---------------
- translation_x = "0:(10*sin(2*3.14*t/10))"#@param {type:"string"} #----------------CONSIDER THIS FOR INPUT/LOCK---------------
- translation_y = "0:(0)"#@param {type:"string"} #----------------CONSIDER THIS FOR INPUT/LOCK---------------
- translation_z = "0:(10)"#@param {type:"string"}#----------------CONSIDER THIS FOR INPUT/LOCK---------------
- rotation_3d_x = "0:(0)"#@param {type:"string"} #----------------CONSIDER THIS FOR INPUT/LOCK---------------
- rotation_3d_y = "0:(0)"#@param {type:"string"} #----------------CONSIDER THIS FOR INPUT/LOCK---------------
- rotation_3d_z = "0:(0)"#@param {type:"string"} #----------------CONSIDER THIS FOR INPUT/LOCK---------------
- flip_2d_perspective = False #@param {type:"boolean"}
- perspective_flip_theta = "0:(0)"#@param {type:"string"}
- perspective_flip_phi = "0:(t%15)"#@param {type:"string"}
- perspective_flip_gamma = "0:(0)"#@param {type:"string"}
- perspective_flip_fv = "0:(53)"#@param {type:"string"}
- noise_schedule = "0: (0.02)"#@param {type:"string"}
- strength_schedule = "0: (0.65)"#@param {type:"string"}
- contrast_schedule = "0: (1.0)"#@param {type:"string"}
- hybrid_video_comp_alpha_schedule = "0:(1)" #@param {type:"string"}
- hybrid_video_comp_mask_blend_alpha_schedule = "0:(0.5)" #@param {type:"string"}
- hybrid_video_comp_mask_contrast_schedule = "0:(1)" #@param {type:"string"}
- hybrid_video_comp_mask_auto_contrast_cutoff_high_schedule = "0:(100)" #@param {type:"string"}
- hybrid_video_comp_mask_auto_contrast_cutoff_low_schedule = "0:(0)" #@param {type:"string"}
-
- #@markdown ####**Unsharp mask (anti-blur) Parameters:**
- kernel_schedule = "0: (5)"#@param {type:"string"}
- sigma_schedule = "0: (1.0)"#@param {type:"string"}
- amount_schedule = "0: (0.2)"#@param {type:"string"}
- threshold_schedule = "0: (0.0)"#@param {type:"string"}
-
- #@markdown ####**Coherence:**
- color_coherence = 'Match Frame 0 LAB' #@param ['None', 'Match Frame 0 HSV', 'Match Frame 0 LAB', 'Match Frame 0 RGB', 'Video Input'] {type:'string'}
- color_coherence_video_every_N_frames = 1 #@param {type:"integer"}
- diffusion_cadence = '1' #@param ['1','2','3','4','5','6','7','8'] {type:'string'}
-
- #@markdown ####**3D Depth Warping:**
- use_depth_warping = True #@param {type:"boolean"}
- midas_weight = 0.3#@param {type:"number"}
- near_plane = 200
- far_plane = 10000
- fov = 40#@param {type:"number"}
- padding_mode = 'border'#@param ['border', 'reflection', 'zeros'] {type:'string'}
- sampling_mode = 'bicubic'#@param ['bicubic', 'bilinear', 'nearest'] {type:'string'}
- save_depth_maps = False #@param {type:"boolean"}
-
-#video input here
-
- #@markdown ####**Video Input:**
- video_init_path = image_input #'/content/video_in.mp4'#@param {type:"string"} #----------------CONSIDER THIS FOR INPUT/LOCK---------------
- extract_nth_frame = 1#@param {type:"number"}
- overwrite_extracted_frames = True #@param {type:"boolean"}
- use_mask_video = False #@param {type:"boolean"}
- video_mask_path ='/content/video_in.mp4'#@param {type:"string"}
-
- #@markdown ####**Hybrid Video for 2D/3D Animation Mode:**
- hybrid_video_generate_inputframes = False #@param {type:"boolean"}
- hybrid_video_use_first_frame_as_init_image = True #@param {type:"boolean"} #----------------CONSIDER THIS FOR INPUT/LOCK---------------
- hybrid_video_motion = "None" #@param ['None','Optical Flow','Perspective','Affine']
- hybrid_video_flow_method = "Farneback" #@param ['Farneback','DenseRLOF','SF']
- hybrid_video_composite = False #@param {type:"boolean"}
- hybrid_video_comp_mask_type = "None" #@param ['None', 'Depth', 'Video Depth', 'Blend', 'Difference']
- hybrid_video_comp_mask_inverse = False #@param {type:"boolean"}
- hybrid_video_comp_mask_equalize = "None" #@param ['None','Before','After','Both']
- hybrid_video_comp_mask_auto_contrast = False #@param {type:"boolean"}
- hybrid_video_comp_save_extra_frames = False #@param {type:"boolean"}
- hybrid_video_use_video_as_mse_image = False #@param {type:"boolean"}
-
- #@markdown ####**Interpolation:**
- interpolate_key_frames = False #@param {type:"boolean"}
- interpolate_x_frames = 4 #@param {type:"number"}
-
- #@markdown ####**Resume Animation:**
- resume_from_timestring = False #@param {type:"boolean"}
- resume_timestring = "20220829210106" #@param {type:"string"}
-
- return locals()
-
-
-#———————————————————— 4.1. Run (create and return images)————————————————————————
-
-#@markdown **Load Settings**
-override_settings_with_file = False #@param {type:"boolean"}
-settings_file = "custom" #@param ["custom", "512x512_aesthetic_0.json","512x512_aesthetic_1.json","512x512_colormatch_0.json","512x512_colormatch_1.json","512x512_colormatch_2.json","512x512_colormatch_3.json"]
-custom_settings_file = "/content/drive/MyDrive/Settings.txt"#@param {type:"string"}
-
-#This function only sets and outputs the arguments for the INFERENCE process
-def DeforumArgs():
- #@markdown **Image Settings**
- W = 540 #@param
- H = 540 #@param
- W, H = map(lambda x: x - x % 64, (W, H)) # resize to integer multiple of 64
- bit_depth_output = 8 #@param [8, 16, 32] {type:"raw"}
-
- #@markdown **Sampling Settings**
- seed = 1 #@param
- sampler = 'euler_ancestral' #@param ["klms","dpm2","dpm2_ancestral","heun","euler","euler_ancestral","plms", "ddim", "dpm_fast", "dpm_adaptive", "dpmpp_2s_a", "dpmpp_2m"]
- steps = 25 #@param #----------------CONSIDER THIS FOR INPUT/LOCK---------------
- scale = 7 #@param
- ddim_eta = 0.0 #@param
- dynamic_threshold = None
- static_threshold = None
-
- #@markdown **Save & Display Settings**
- save_samples = False #@param {type:"boolean"} #----------------CONSIDER THIS FOR INPUT/LOCK---------------
- save_settings = False #@param {type:"boolean"}
- display_samples = False #@param {type:"boolean"} #----------------CONSIDER THIS FOR INPUT/LOCK---------------
- save_sample_per_step = False #@param {type:"boolean"}
- show_sample_per_step = False #@param {type:"boolean"}
-
- #@markdown **Prompt Settings**
- prompt_weighting = True #@param {type:"boolean"}
- normalize_prompt_weights = True #@param {type:"boolean"}
- log_weighted_subprompts = False #@param {type:"boolean"}
-
- #@markdown **Batch Settings**
- n_batch = 12 #@param #----------------CONSIDER THIS FOR INPUT/LOCK---------------
-
- batch_name = "HuggingTest1" #@param {type:"string"} #----------------CONSIDER THIS FOR INPUT/LOCK---------------
- filename_format = "{timestring}_{index}_{prompt}.png" #@param ["{timestring}_{index}_{seed}.png","{timestring}_{index}_{prompt}.png"]
- seed_behavior = "iter" #@param ["iter","fixed","random","ladder","alternate"] #----------------CONSIDER THIS FOR INPUT/LOCK---------------
- seed_iter_N = 1 #@param {type:'integer'} #----------------CONSIDER THIS FOR INPUT/LOCK---------------
- make_grid = False #@param {type:"boolean"}
- grid_rows = 2 #@param
- outdir = get_output_folder(root.output_path, batch_name) #----------------CONSIDER THIS FOR INPUT/LOCK---------------
-
- #@markdown **Init Settings**
- use_init = False #@param {type:"boolean"} #----------------CONSIDER THIS FOR INPUT/LOCK---------------
- strength = 0.75 #@param {type:"number"} #----------------CONSIDER THIS FOR INPUT/LOCK---------------
- strength_0_no_init = True # Set the strength to 0 automatically when no init image is used
- init_image = "/content/drive/MyDrive/AI/init_images/Hallucinate.png" #@param {type:"string"} #----------------CONSIDER THIS FOR INPUT/LOCK---------------
- # Whiter areas of the mask are areas that change more
- use_mask = False #@param {type:"boolean"} #----------------CONSIDER THIS FOR INPUT/LOCK---------------
- use_alpha_as_mask = False # use the alpha channel of the init image as the mask
- mask_file = "https://www.filterforge.com/wiki/images/archive/b/b7/20080927223728%21Polygonal_gradient_thumb.jpg" #@param {type:"string"}
- invert_mask = False #@param {type:"boolean"}
- # Adjust mask image, 1.0 is no adjustment. Should be positive numbers.
- mask_brightness_adjust = 1.0 #@param {type:"number"}
- mask_contrast_adjust = 1.0 #@param {type:"number"}
- # Overlay the masked image at the end of the generation so it does not get degraded by encoding and decoding
- overlay_mask = True # {type:"boolean"}
- # Blur edges of final overlay mask, if used. Minimum = 0 (no blur)
- mask_overlay_blur = 5 # {type:"number"}
-
- #@markdown **Exposure/Contrast Conditional Settings**
- mean_scale = 0 #@param {type:"number"}
- var_scale = 0 #@param {type:"number"}
- exposure_scale = 0 #@param {type:"number"}
- exposure_target = 0.5 #@param {type:"number"}
-
- #@markdown **Color Match Conditional Settings**
- colormatch_scale = 0 #@param {type:"number"}
- colormatch_image = "https://www.saasdesign.io/wp-content/uploads/2021/02/palette-3-min-980x588.png" #@param {type:"string"}
- colormatch_n_colors = 4 #@param {type:"number"}
- ignore_sat_weight = 0 #@param {type:"number"}
-
- #@markdown **CLIP\Aesthetics Conditional Settings**
- clip_name = 'ViT-L/14' #@param ['ViT-L/14', 'ViT-L/14@336px', 'ViT-B/16', 'ViT-B/32']
- clip_scale = 0 #@param {type:"number"}
- aesthetics_scale = 0 #@param {type:"number"}
- cutn = 1 #@param {type:"number"}
- cut_pow = 0.0001 #@param {type:"number"}
-
- #@markdown **Other Conditional Settings**
- init_mse_scale = 0 #@param {type:"number"}
- init_mse_image = "https://cdn.pixabay.com/photo/2022/07/30/13/10/green-longhorn-beetle-7353749_1280.jpg" #@param {type:"string"}
-
- blue_scale = 0 #@param {type:"number"}
-
- #@markdown **Conditional Gradient Settings**
- gradient_wrt = 'x0_pred' #@param ["x", "x0_pred"]
- gradient_add_to = 'both' #@param ["cond", "uncond", "both"]
- decode_method = 'linear' #@param ["autoencoder","linear"]
- grad_threshold_type = 'dynamic' #@param ["dynamic", "static", "mean", "schedule"]
- clamp_grad_threshold = 0.2 #@param {type:"number"}
- clamp_start = 0.2 #@param
- clamp_stop = 0.01 #@param
- grad_inject_timing = list(range(1,10)) #@param
-
- #@markdown **Speed vs VRAM Settings**
- cond_uncond_sync = True #@param {type:"boolean"}
-
- n_samples = 1 # doesnt do anything
- precision = 'autocast'
- C = 4
- f = 8
-
- prompt = ""
- timestring = ""
- init_latent = None
- init_sample = None
- init_sample_raw = None
- mask_sample = None
- init_c = None
- seed_internal = 0
-
- return locals()
-
-#This segment prepares arguments and adjusts settings
-
-# Define default arguments for the program
-args_dict = DeforumArgs()
-anim_args_dict = DeforumAnimArgs()
-
-# Override default arguments with values from settings file, if specified
-if override_settings_with_file:
- load_args(args_dict, anim_args_dict, settings_file, custom_settings_file, verbose=False)
-
-# Create SimpleNamespace objects for arguments and animation arguments
-args = SimpleNamespace(**args_dict)
-anim_args = SimpleNamespace(**anim_args_dict)
-
-# Set timestring to current time in YYYYMMDDHHMMSS format
-args.timestring = time.strftime('%Y%m%d%H%M%S')
-# Ensure strength is within valid range of 0.0 to 1.0
-args.strength = max(0.0, min(1.0, args.strength))
-
-# Load clip model if using clip guidance or aesthetics model if aesthetics_scale is > 0
-if (args.clip_scale > 0) or (args.aesthetics_scale > 0):
- # Load clip model and set to evaluation mode without requiring gradient
- root.clip_model = clip.load(args.clip_name, jit=False)[0].eval().requires_grad_(False).to(root.device)
- if (args.aesthetics_scale > 0):
- # Load aesthetics model if aesthetics_scale is > 0
- root.aesthetics_model = load_aesthetics_model(args, root)
-
-# Set seed to a random number if not specified
-if args.seed == -1:
- args.seed = random.randint(0, 2**32 - 1)
-
-# If not using init image, set init_image to None
-if not args.use_init:
- args.init_image = None
-
-# If using plms sampler with init image or animation mode isn't None, switch to klms sampler
-if args.sampler == 'plms' and (args.use_init or anim_args.animation_mode != 'None'):
- print(f"Init images aren't supported with PLMS yet, switching to KLMS")
- args.sampler = 'klms'
-
-# If not using ddim sampler, set ddim_eta to 0
-if args.sampler != 'ddim':
- args.ddim_eta = 0
-
-# Set max_frames to 1 if animation mode is None, or use_init to True if animation mode is Video Input
-if anim_args.animation_mode == 'None':
- anim_args.max_frames = 1
-elif anim_args.animation_mode == 'Video Input':
- args.use_init = True
-
-# Clean up unused memory and empty CUDA cache
-gc.collect()
-torch.cuda.empty_cache()
-
-# Dispatch to appropriate renderer based on animation mode
-#These are probably imported from stable diffusion
-
-#lets try to place it within an infer function
-def infer(args, anim_args, animation_prompts, root, prompts):
- #render animation (the main one)
- if anim_args.animation_mode == '2D' or anim_args.animation_mode == '3D':
- render_animation(args, anim_args, animation_prompts, root)
-
- #render input video
- elif anim_args.animation_mode == 'Video Input':
- render_input_video(args, anim_args, animation_prompts, root)
-
- #render interpolation
- elif anim_args.animation_mode == 'Interpolation':
- render_interpolation(args, anim_args, animation_prompts, root)
-
- #render image batch
- else:
- render_image_batch(args, prompts, root)
-
-infer(args, anim_args, animation_prompts, root, prompts)
-
-#———————————————————— 4.2. Launch? ————————————————————————
-block.queue(concurrency_count=80, max_size=100).launch(max_threads=150) #from SD to balance/limit requests
-
-with block:
- with gr.Group():
- btn.click(infer, inputs=[image_input], outputs=[video_output, community_icon, loading_icon, share_button])
-block.launch()#share=True)
-
-
-
-#———————————————————— 4.2. Create Videos from Frames ————————————————————————
-
-#set FPS / video speed
-#skip_video_for_run_all = False #@param {type: 'boolean'}
-fps = 12 #@param {type:"number"} #HARDCODED FPS HERE: CONSIDER GIVING OPTION TO USERS
-
-#manual settings for paths
-
-#@markdown **Manual Settings**
-use_manual_settings = False #@param {type:"boolean"} #MOD THIS?
-image_path = "/content/drive/MyDrive/AI/StableDiffusion/2023-02/Test14/0_%05d.png" #@param {type:"string"} #MOD THIS?
-mp4_path = "/content/drive/MyDrive/AI/StableDiffusion/2023-02/Test14/0_%05d.mp4" #@param {type:"string"} #MOD THIS?
-render_steps = False #@param {type: 'boolean'}
-path_name_modifier = "x0_pred" #@param ["x0_pred","x"]
-make_gif = False
-bitdepth_extension = "exr" if args.bit_depth_output == 32 else "png"
-
-# render steps from a single image
-if render_steps:
-
- # get file name and directory of latest output directory
- fname = f"{path_name_modifier}_%05d.png" #MOD THIS?
- all_step_dirs = [os.path.join(args.outdir, d) for d in os.listdir(args.outdir) if os.path.isdir(os.path.join(args.outdir,d))]
- newest_dir = max(all_step_dirs, key=os.path.getmtime)
-
- # create image and video paths
- image_path = os.path.join(newest_dir, fname)
- mp4_path = os.path.join(newest_dir, f"{args.timestring}_{path_name_modifier}.mp4")
- max_frames = str(args.steps)
-
-# render images for a video
-else:
- # create image and video paths with timestamp and bit depth extension
- image_path = os.path.join(args.outdir, f"{args.timestring}_%05d.{bitdepth_extension}")
- mp4_path = os.path.join(args.outdir, f"{args.timestring}.mp4")
- max_frames = str(anim_args.max_frames)
-
-#-------
-
-# make video
-# create a list with the command and its parameters to call ffmpeg to encode images into an mp4 video
-cmd = [
- 'ffmpeg', # specify the name of the executable command
- '-y', # overwrite output files without asking
- '-vcodec', bitdepth_extension, # specify the video codec to be used
- '-r', str(fps), # specify the frames per second (fps) of the output video
- '-start_number', str(0), # specify the starting number of the image sequence
- '-i', image_path, # specify the input image sequence (using format specifier)
- '-frames:v', max_frames, # specify the number of frames to be encoded
- '-c:v', 'libx264', # specify the video codec to be used for encoding
- '-vf',
- f'fps={fps}', # specify the fps of the output video again
- '-pix_fmt', 'yuv420p', # specify the pixel format of the output video
- '-crf', '17', # specify the constant rate factor (crf) for video quality
- '-preset', 'veryfast', # specify the encoding speed preset
- '-pattern_type', 'sequence', # specify the type of pattern used for input file names
- mp4_path # specify the output mp4 video file path and name
-]
-
-# call the ffmpeg command using subprocess to encode images into an mp4 video
-process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-stdout, stderr = process.communicate()
-
-if process.returncode != 0: # if ffmpeg command execution returns non-zero code, indicating an error
- # print the error message and raise an exception
- #print(stderr)
- raise RuntimeError(stderr)
-
-
-#display video
-mp4 = open(mp4_path,'rb').read()
-#data_url = "data:video/mp4;base64," + b64encode(mp4).decode()
-#display.display(display.HTML(f'') )
-
-#make gif
-if make_gif:
- gif_path = os.path.splitext(mp4_path)[0]+'.gif'
- cmd_gif = [
- 'ffmpeg',
- '-y',
- '-i', mp4_path,
- '-r', str(fps),
- gif_path
- ]
- process_gif = subprocess.Popen(cmd_gif, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
-
-#————————————————————5.1 Add Examples to interface ————————————————————————
-
-#Add examples: see line 158 and 307 at https://huggingface.co/spaces/stabilityai/stable-diffusion/blob/main/app.py
-
-
-
-
-
-#—————————————————————————————
-
-
-#the function fn can be either
- #render_animation,
- #render_input_video(args, anim_args, animation_prompts, root),
- #render_interpolation,
- #render_image_batch
- #depending on animation_mode (268)
-
-#the output will be in the variable 'mp4' (or in 'mp4_path' , see 609)
-
-#—————————————————Launch Demo—————————————————————————————
-demo = gr.Interface(fn= infer, inputs=image_input, outputs=mp4, title=title, description=description, article=article)
-demo.launch()
-#demo.launch(auth = ("demo", "demo"), auth_message = "Enter username and password to access application")
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/optim/adam.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/optim/adam.py
deleted file mode 100644
index d3ae9e64a74774310adcd9968d2eae23368890f9..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/optim/adam.py
+++ /dev/null
@@ -1,239 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-import math
-from collections.abc import Collection
-from dataclasses import dataclass, field
-from typing import Any, List
-
-import torch
-import torch.distributed as dist
-import torch.optim
-from fairseq.dataclass import FairseqDataclass
-from fairseq.optim import FairseqOptimizer, register_optimizer
-from fairseq.optim.fused_adam import get_fused_adam_class
-from omegaconf import II, OmegaConf
-
-
-logger = logging.getLogger(__name__)
-
-
-@dataclass
-class FairseqAdamConfig(FairseqDataclass):
- adam_betas: Any = field(
- default=(0.9, 0.999), metadata={"help": "betas for Adam optimizer"}
- )
- adam_eps: float = field(
- default=1e-8, metadata={"help": "epsilon for Adam optimizer"}
- )
- weight_decay: float = field(default=0.0, metadata={"help": "weight decay"})
- use_old_adam: bool = field(
- default=False, metadata={"help": "Use fairseq.optim.adam.Adam"}
- )
- fp16_adam_stats: bool = field(
- default=False, metadata={"help": "use FP16 stats (with automatic scaling)"}
- )
- # TODO common vars below in parent
- tpu: bool = II("common.tpu")
- lr: List[float] = II("optimization.lr")
-
-
-@register_optimizer("adam", dataclass=FairseqAdamConfig)
-class FairseqAdam(FairseqOptimizer):
- """Adam optimizer for fairseq.
-
- Important note: this optimizer corresponds to the "AdamW" variant of
- Adam in its weight decay behavior. As such, it is most closely
- analogous to torch.optim.AdamW from PyTorch.
- """
-
- def __init__(self, cfg: FairseqAdamConfig, params):
- super().__init__(cfg)
- fused_adam_cls = get_fused_adam_class()
- use_fused_adam = (
- not getattr(cfg, "use_old_adam", False)
- and fused_adam_cls is not None
- and torch.cuda.is_available()
- )
- if getattr(cfg, "tpu", False):
- if self.cfg.fp16_adam_stats:
- raise NotImplementedError("--fp16-adam-stats is only supported on GPU")
- # on TPUs we use the Adam defined here, since it
- # automatically casts gradients to FP32
- self._optimizer = Adam(params, **self.optimizer_config)
- elif use_fused_adam:
- logger.info("using FusedAdam")
- self._optimizer = fused_adam_cls(
- params,
- use_fp16_stats=self.cfg.fp16_adam_stats,
- **self.optimizer_config
- )
- else:
- if self.cfg.fp16_adam_stats:
- raise NotImplementedError("--fp16-adam-stats is only supported with FusedAdamV1")
- self._optimizer = Adam(params, **self.optimizer_config)
-
- @property
- def optimizer_config(self):
- """
- Return a kwarg dictionary that will be used to override optimizer
- args stored in checkpoints. This allows us to load a checkpoint and
- resume training using a different set of optimizer args, e.g., with a
- different learning rate.
- """
- return {
- "lr": self.cfg.lr[0]
- if isinstance(self.cfg.lr, Collection)
- else self.cfg.lr,
- "betas": eval(self.cfg.adam_betas)
- if isinstance(self.cfg.adam_betas, str)
- else OmegaConf.to_container(self.cfg.adam_betas),
- "eps": self.cfg.adam_eps,
- "weight_decay": self.cfg.weight_decay,
- }
-
- def average_params(self):
- """Reduce Params is only used during BMUF distributed training."""
- state_dict = self.optimizer.state_dict()
- total_gpus = float(dist.get_world_size())
-
- for _, value in state_dict["state"].items():
- value["exp_avg"] /= total_gpus
- value["exp_avg_sq"] /= total_gpus
- dist.all_reduce(value["exp_avg"], op=dist.ReduceOp.SUM)
- dist.all_reduce(value["exp_avg_sq"], op=dist.ReduceOp.SUM)
-
-
-class Adam(torch.optim.Optimizer):
- r"""Implements Adam algorithm.
-
- This implementation is modified from torch.optim.Adam based on:
- `Fixed Weight Decay Regularization in Adam`
- (see https://arxiv.org/abs/1711.05101)
-
- It has been proposed in `Adam: A Method for Stochastic Optimization`_.
-
- Args:
- params (iterable): iterable of parameters to optimize or dicts defining
- parameter groups
- lr (float, optional): learning rate (default: 1e-3)
- betas (Tuple[float, float], optional): coefficients used for computing
- running averages of gradient and its square (default: (0.9, 0.999))
- eps (float, optional): term added to the denominator to improve
- numerical stability (default: 1e-8)
- weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
- amsgrad (boolean, optional): whether to use the AMSGrad variant of this
- algorithm from the paper `On the Convergence of Adam and Beyond`_
-
- .. _Adam\: A Method for Stochastic Optimization:
- https://arxiv.org/abs/1412.6980
- .. _On the Convergence of Adam and Beyond:
- https://openreview.net/forum?id=ryQu7f-RZ
- """
-
- def __init__(
- self,
- params,
- lr=1e-3,
- betas=(0.9, 0.999),
- eps=1e-8,
- weight_decay=0,
- amsgrad=False,
- ):
- defaults = dict(
- lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad
- )
- super(Adam, self).__init__(params, defaults)
-
- @property
- def supports_memory_efficient_fp16(self):
- return True
-
- @property
- def supports_flat_params(self):
- return True
-
- def step(self, closure=None):
- """Performs a single optimization step.
-
- Args:
- closure (callable, optional): A closure that reevaluates the model
- and returns the loss.
- """
- loss = None
- if closure is not None:
- loss = closure()
-
- for group in self.param_groups:
- for p in group["params"]:
- if p.grad is None:
- continue
- grad = p.grad.data
- if grad.dtype in {torch.float16, torch.bfloat16}:
- grad = grad.float()
- if grad.is_sparse:
- raise RuntimeError(
- "Adam does not support sparse gradients, please consider SparseAdam instead"
- )
- amsgrad = group.get("amsgrad", False)
-
- p_data_fp32 = p.data
- if p.data.dtype in {torch.float16, torch.bfloat16}:
- p_data_fp32 = p_data_fp32.float()
-
- state = self.state[p]
-
- # State initialization
- if len(state) == 0:
- state["step"] = 0
- # Exponential moving average of gradient values
- state["exp_avg"] = torch.zeros_like(p_data_fp32)
- # Exponential moving average of squared gradient values
- state["exp_avg_sq"] = torch.zeros_like(p_data_fp32)
- if amsgrad:
- # Maintains max of all exp. moving avg. of sq. grad. values
- state["max_exp_avg_sq"] = torch.zeros_like(p_data_fp32)
- else:
- state["exp_avg"] = state["exp_avg"].to(p_data_fp32)
- state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32)
- if amsgrad:
- state["max_exp_avg_sq"] = state["max_exp_avg_sq"].to(
- p_data_fp32
- )
-
- exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
- if amsgrad:
- max_exp_avg_sq = state["max_exp_avg_sq"]
- beta1, beta2 = group["betas"]
-
- state["step"] += 1
-
- # Decay the first and second moment running average coefficient
- exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
- exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
- if amsgrad:
- # Maintains the maximum of all 2nd moment running avg. till now
- torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
- # Use the max. for normalizing running avg. of gradient
- denom = max_exp_avg_sq.sqrt().add_(group["eps"])
- else:
- denom = exp_avg_sq.sqrt().add_(group["eps"])
-
- bias_correction1 = 1 - beta1 ** state["step"]
- bias_correction2 = 1 - beta2 ** state["step"]
- step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1
-
- if group["weight_decay"] != 0:
- p_data_fp32.add_(
- p_data_fp32, alpha=-group["weight_decay"] * group["lr"]
- )
-
- p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size)
-
- if p.data.dtype in {torch.float16, torch.bfloat16}:
- p.data.copy_(p_data_fp32)
-
- return loss
diff --git a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/utils/inference/transliterate.py b/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/utils/inference/transliterate.py
deleted file mode 100644
index de1ccab4426659552a019b593c4766522efff616..0000000000000000000000000000000000000000
--- a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/utils/inference/transliterate.py
+++ /dev/null
@@ -1,919 +0,0 @@
-import torch
-import torch.nn as nn
-import numpy as np
-import pandas as pd
-import random
-import sys
-import os
-import json
-import enum
-import traceback
-import re
-
-#F_DIR = os.path.dirname(os.path.realpath(__file__))
-F_DIR = '/home/user/app/ttsv/checkpoints/'
-
-class XlitError(enum.Enum):
- lang_err = "Unsupported langauge ID requested ;( Please check available languages."
- string_err = "String passed is incompatable ;("
- internal_err = "Internal crash ;("
- unknown_err = "Unknown Failure"
- loading_err = "Loading failed ;( Check if metadata/paths are correctly configured."
-
-
-##=================== Network ==================================================
-
-
-class Encoder(nn.Module):
- def __init__(
- self,
- input_dim,
- embed_dim,
- hidden_dim,
- rnn_type="gru",
- layers=1,
- bidirectional=False,
- dropout=0,
- device="cpu",
- ):
- super(Encoder, self).__init__()
-
- self.input_dim = input_dim # src_vocab_sz
- self.enc_embed_dim = embed_dim
- self.enc_hidden_dim = hidden_dim
- self.enc_rnn_type = rnn_type
- self.enc_layers = layers
- self.enc_directions = 2 if bidirectional else 1
- self.device = device
-
- self.embedding = nn.Embedding(self.input_dim, self.enc_embed_dim)
-
- if self.enc_rnn_type == "gru":
- self.enc_rnn = nn.GRU(
- input_size=self.enc_embed_dim,
- hidden_size=self.enc_hidden_dim,
- num_layers=self.enc_layers,
- bidirectional=bidirectional,
- )
- elif self.enc_rnn_type == "lstm":
- self.enc_rnn = nn.LSTM(
- input_size=self.enc_embed_dim,
- hidden_size=self.enc_hidden_dim,
- num_layers=self.enc_layers,
- bidirectional=bidirectional,
- )
- else:
- raise Exception("XlitError: unknown RNN type mentioned")
-
- def forward(self, x, x_sz, hidden=None):
- """
- x_sz: (batch_size, 1) - Unpadded sequence lengths used for pack_pad
- """
- batch_sz = x.shape[0]
- # x: batch_size, max_length, enc_embed_dim
- x = self.embedding(x)
-
- ## pack the padded data
- # x: max_length, batch_size, enc_embed_dim -> for pack_pad
- x = x.permute(1, 0, 2)
- x = nn.utils.rnn.pack_padded_sequence(x, x_sz, enforce_sorted=False) # unpad
-
- # output: packed_size, batch_size, enc_embed_dim
- # hidden: n_layer**num_directions, batch_size, hidden_dim | if LSTM (h_n, c_n)
- output, hidden = self.enc_rnn(
- x
- ) # gru returns hidden state of all timesteps as well as hidden state at last timestep
-
- ## pad the sequence to the max length in the batch
- # output: max_length, batch_size, enc_emb_dim*directions)
- output, _ = nn.utils.rnn.pad_packed_sequence(output)
-
- # output: batch_size, max_length, hidden_dim
- output = output.permute(1, 0, 2)
-
- return output, hidden
-
- def get_word_embedding(self, x):
- """ """
- x_sz = torch.tensor([len(x)])
- x_ = torch.tensor(x).unsqueeze(0).to(dtype=torch.long)
- # x: 1, max_length, enc_embed_dim
- x = self.embedding(x_)
-
- ## pack the padded data
- # x: max_length, 1, enc_embed_dim -> for pack_pad
- x = x.permute(1, 0, 2)
- x = nn.utils.rnn.pack_padded_sequence(x, x_sz, enforce_sorted=False) # unpad
-
- # output: packed_size, 1, enc_embed_dim
- # hidden: n_layer**num_directions, 1, hidden_dim | if LSTM (h_n, c_n)
- output, hidden = self.enc_rnn(
- x
- ) # gru returns hidden state of all timesteps as well as hidden state at last timestep
-
- out_embed = hidden[0].squeeze()
-
- return out_embed
-
-
-class Decoder(nn.Module):
- def __init__(
- self,
- output_dim,
- embed_dim,
- hidden_dim,
- rnn_type="gru",
- layers=1,
- use_attention=True,
- enc_outstate_dim=None, # enc_directions * enc_hidden_dim
- dropout=0,
- device="cpu",
- ):
- super(Decoder, self).__init__()
-
- self.output_dim = output_dim # tgt_vocab_sz
- self.dec_hidden_dim = hidden_dim
- self.dec_embed_dim = embed_dim
- self.dec_rnn_type = rnn_type
- self.dec_layers = layers
- self.use_attention = use_attention
- self.device = device
- if self.use_attention:
- self.enc_outstate_dim = enc_outstate_dim if enc_outstate_dim else hidden_dim
- else:
- self.enc_outstate_dim = 0
-
- self.embedding = nn.Embedding(self.output_dim, self.dec_embed_dim)
-
- if self.dec_rnn_type == "gru":
- self.dec_rnn = nn.GRU(
- input_size=self.dec_embed_dim
- + self.enc_outstate_dim, # to concat attention_output
- hidden_size=self.dec_hidden_dim, # previous Hidden
- num_layers=self.dec_layers,
- batch_first=True,
- )
- elif self.dec_rnn_type == "lstm":
- self.dec_rnn = nn.LSTM(
- input_size=self.dec_embed_dim
- + self.enc_outstate_dim, # to concat attention_output
- hidden_size=self.dec_hidden_dim, # previous Hidden
- num_layers=self.dec_layers,
- batch_first=True,
- )
- else:
- raise Exception("XlitError: unknown RNN type mentioned")
-
- self.fc = nn.Sequential(
- nn.Linear(self.dec_hidden_dim, self.dec_embed_dim),
- nn.LeakyReLU(),
- # nn.Linear(self.dec_embed_dim, self.dec_embed_dim), nn.LeakyReLU(), # removing to reduce size
- nn.Linear(self.dec_embed_dim, self.output_dim),
- )
-
- ##----- Attention ----------
- if self.use_attention:
- self.W1 = nn.Linear(self.enc_outstate_dim, self.dec_hidden_dim)
- self.W2 = nn.Linear(self.dec_hidden_dim, self.dec_hidden_dim)
- self.V = nn.Linear(self.dec_hidden_dim, 1)
-
- def attention(self, x, hidden, enc_output):
- """
- x: (batch_size, 1, dec_embed_dim) -> after Embedding
- enc_output: batch_size, max_length, enc_hidden_dim *num_directions
- hidden: n_layers, batch_size, hidden_size | if LSTM (h_n, c_n)
- """
-
- ## perform addition to calculate the score
-
- # hidden_with_time_axis: batch_size, 1, hidden_dim
- ## hidden_with_time_axis = hidden.permute(1, 0, 2) ## replaced with below 2lines
- hidden_with_time_axis = (
- torch.sum(hidden, axis=0)
- if self.dec_rnn_type != "lstm"
- else torch.sum(hidden[0], axis=0)
- ) # h_n
-
- hidden_with_time_axis = hidden_with_time_axis.unsqueeze(1)
-
- # score: batch_size, max_length, hidden_dim
- score = torch.tanh(self.W1(enc_output) + self.W2(hidden_with_time_axis))
-
- # attention_weights: batch_size, max_length, 1
- # we get 1 at the last axis because we are applying score to self.V
- attention_weights = torch.softmax(self.V(score), dim=1)
-
- # context_vector shape after sum == (batch_size, hidden_dim)
- context_vector = attention_weights * enc_output
- context_vector = torch.sum(context_vector, dim=1)
- # context_vector: batch_size, 1, hidden_dim
- context_vector = context_vector.unsqueeze(1)
-
- # attend_out (batch_size, 1, dec_embed_dim + hidden_size)
- attend_out = torch.cat((context_vector, x), -1)
-
- return attend_out, attention_weights
-
- def forward(self, x, hidden, enc_output):
- """
- x: (batch_size, 1)
- enc_output: batch_size, max_length, dec_embed_dim
- hidden: n_layer, batch_size, hidden_size | lstm: (h_n, c_n)
- """
- if (hidden is None) and (self.use_attention is False):
- raise Exception(
- "XlitError: No use of a decoder with No attention and No Hidden"
- )
-
- batch_sz = x.shape[0]
-
- if hidden is None:
- # hidden: n_layers, batch_size, hidden_dim
- hid_for_att = torch.zeros(
- (self.dec_layers, batch_sz, self.dec_hidden_dim)
- ).to(self.device)
- elif self.dec_rnn_type == "lstm":
- hid_for_att = hidden[1] # c_n
-
- # x (batch_size, 1, dec_embed_dim) -> after embedding
- x = self.embedding(x)
-
- if self.use_attention:
- # x (batch_size, 1, dec_embed_dim + hidden_size) -> after attention
- # aw: (batch_size, max_length, 1)
- x, aw = self.attention(x, hidden, enc_output)
- else:
- x, aw = x, 0
-
- # passing the concatenated vector to the GRU
- # output: (batch_size, n_layers, hidden_size)
- # hidden: n_layers, batch_size, hidden_size | if LSTM (h_n, c_n)
- output, hidden = (
- self.dec_rnn(x, hidden) if hidden is not None else self.dec_rnn(x)
- )
-
- # output :shp: (batch_size * 1, hidden_size)
- output = output.view(-1, output.size(2))
-
- # output :shp: (batch_size * 1, output_dim)
- output = self.fc(output)
-
- return output, hidden, aw
-
-
-class Seq2Seq(nn.Module):
- """
- Class dependency: Encoder, Decoder
- """
-
- def __init__(
- self, encoder, decoder, pass_enc2dec_hid=False, dropout=0, device="cpu"
- ):
- super(Seq2Seq, self).__init__()
-
- self.encoder = encoder
- self.decoder = decoder
- self.device = device
- self.pass_enc2dec_hid = pass_enc2dec_hid
- _force_en2dec_hid_conv = False
-
- if self.pass_enc2dec_hid:
- assert (
- decoder.dec_hidden_dim == encoder.enc_hidden_dim
- ), "Hidden Dimension of encoder and decoder must be same, or unset `pass_enc2dec_hid`"
- if decoder.use_attention:
- assert (
- decoder.enc_outstate_dim
- == encoder.enc_directions * encoder.enc_hidden_dim
- ), "Set `enc_out_dim` correctly in decoder"
- assert (
- self.pass_enc2dec_hid or decoder.use_attention
- ), "No use of a decoder with No attention and No Hidden from Encoder"
-
- self.use_conv_4_enc2dec_hid = False
- if (
- self.pass_enc2dec_hid
- and (encoder.enc_directions * encoder.enc_layers != decoder.dec_layers)
- ) or _force_en2dec_hid_conv:
- if encoder.enc_rnn_type == "lstm" or encoder.enc_rnn_type == "lstm":
- raise Exception(
- "XlitError: conv for enc2dec_hid not implemented; Change the layer numbers appropriately"
- )
-
- self.use_conv_4_enc2dec_hid = True
- self.enc_hid_1ax = encoder.enc_directions * encoder.enc_layers
- self.dec_hid_1ax = decoder.dec_layers
- self.e2d_hidden_conv = nn.Conv1d(self.enc_hid_1ax, self.dec_hid_1ax, 1)
-
- def enc2dec_hidden(self, enc_hidden):
- """
- enc_hidden: n_layer, batch_size, hidden_dim*num_directions
- TODO: Implement the logic for LSTm bsed model
- """
- # hidden: batch_size, enc_layer*num_directions, enc_hidden_dim
- hidden = enc_hidden.permute(1, 0, 2).contiguous()
- # hidden: batch_size, dec_layers, dec_hidden_dim -> [N,C,Tstep]
- hidden = self.e2d_hidden_conv(hidden)
-
- # hidden: dec_layers, batch_size , dec_hidden_dim
- hidden_for_dec = hidden.permute(1, 0, 2).contiguous()
-
- return hidden_for_dec
-
- def active_beam_inference(self, src, beam_width=3, max_tgt_sz=50):
- """Search based decoding
- src: (sequence_len)
- """
-
- def _avg_score(p_tup):
- """Used for Sorting
- TODO: Dividing by length of sequence power alpha as hyperparam
- """
- return p_tup[0]
-
- import sys
-
- batch_size = 1
- start_tok = src[0]
- end_tok = src[-1]
- src_sz = torch.tensor([len(src)])
- src_ = src.unsqueeze(0)
-
- # enc_output: (batch_size, padded_seq_length, enc_hidden_dim*num_direction)
- # enc_hidden: (enc_layers*num_direction, batch_size, hidden_dim)
- enc_output, enc_hidden = self.encoder(src_, src_sz)
-
- if self.pass_enc2dec_hid:
- # dec_hidden: dec_layers, batch_size , dec_hidden_dim
- if self.use_conv_4_enc2dec_hid:
- init_dec_hidden = self.enc2dec_hidden(enc_hidden)
- else:
- init_dec_hidden = enc_hidden
- else:
- # dec_hidden -> Will be initialized to zeros internally
- init_dec_hidden = None
-
- # top_pred[][0] = Σ-log_softmax
- # top_pred[][1] = sequence torch.tensor shape: (1)
- # top_pred[][2] = dec_hidden
- top_pred_list = [(0, start_tok.unsqueeze(0), init_dec_hidden)]
-
- for t in range(max_tgt_sz):
- cur_pred_list = []
-
- for p_tup in top_pred_list:
- if p_tup[1][-1] == end_tok:
- cur_pred_list.append(p_tup)
- continue
-
- # dec_hidden: dec_layers, 1, hidden_dim
- # dec_output: 1, output_dim
- dec_output, dec_hidden, _ = self.decoder(
- x=p_tup[1][-1].view(1, 1), # dec_input: (1,1)
- hidden=p_tup[2],
- enc_output=enc_output,
- )
-
- ## π{prob} = Σ{log(prob)} -> to prevent diminishing
- # dec_output: (1, output_dim)
- dec_output = nn.functional.log_softmax(dec_output, dim=1)
- # pred_topk.values & pred_topk.indices: (1, beam_width)
- pred_topk = torch.topk(dec_output, k=beam_width, dim=1)
-
- for i in range(beam_width):
- sig_logsmx_ = p_tup[0] + pred_topk.values[0][i]
- # seq_tensor_ : (seq_len)
- seq_tensor_ = torch.cat((p_tup[1], pred_topk.indices[0][i].view(1)))
-
- cur_pred_list.append((sig_logsmx_, seq_tensor_, dec_hidden))
-
- cur_pred_list.sort(key=_avg_score, reverse=True) # Maximized order
- top_pred_list = cur_pred_list[:beam_width]
-
- # check if end_tok of all topk
- end_flags_ = [1 if t[1][-1] == end_tok else 0 for t in top_pred_list]
- if beam_width == sum(end_flags_):
- break
-
- pred_tnsr_list = [t[1] for t in top_pred_list]
-
- return pred_tnsr_list
-
-
-##===================== Glyph handlers =======================================
-
-
-class GlyphStrawboss:
- def __init__(self, glyphs="en"):
- """list of letters in a language in unicode
- lang: ISO Language code
- glyphs: json file with script information
- """
- if glyphs == "en":
- # Smallcase alone
- self.glyphs = [chr(alpha) for alpha in range(97, 122 + 1)]
- else:
- self.dossier = json.load(open(glyphs, encoding="utf-8"))
- self.glyphs = self.dossier["glyphs"]
- self.numsym_map = self.dossier["numsym_map"]
-
- self.char2idx = {}
- self.idx2char = {}
- self._create_index()
-
- def _create_index(self):
-
- self.char2idx["_"] = 0 # pad
- self.char2idx["$"] = 1 # start
- self.char2idx["#"] = 2 # end
- self.char2idx["*"] = 3 # Mask
- self.char2idx["'"] = 4 # apostrophe U+0027
- self.char2idx["%"] = 5 # unused
- self.char2idx["!"] = 6 # unused
-
- # letter to index mapping
- for idx, char in enumerate(self.glyphs):
- self.char2idx[char] = idx + 7 # +7 token initially
-
- # index to letter mapping
- for char, idx in self.char2idx.items():
- self.idx2char[idx] = char
-
- def size(self):
- return len(self.char2idx)
-
- def word2xlitvec(self, word):
- """Converts given string of gyphs(word) to vector(numpy)
- Also adds tokens for start and end
- """
- try:
- vec = [self.char2idx["$"]] # start token
- for i in list(word):
- vec.append(self.char2idx[i])
- vec.append(self.char2idx["#"]) # end token
-
- vec = np.asarray(vec, dtype=np.int64)
- return vec
-
- except Exception as error:
- print("XlitError: In word:", word, "Error Char not in Token:", error)
- sys.exit()
-
- def xlitvec2word(self, vector):
- """Converts vector(numpy) to string of glyphs(word)"""
- char_list = []
- for i in vector:
- char_list.append(self.idx2char[i])
-
- word = "".join(char_list).replace("$", "").replace("#", "") # remove tokens
- word = word.replace("_", "").replace("*", "") # remove tokens
- return word
-
-
-class VocabSanitizer:
- def __init__(self, data_file):
- """
- data_file: path to file conatining vocabulary list
- """
- extension = os.path.splitext(data_file)[-1]
- if extension == ".json":
- self.vocab_set = set(json.load(open(data_file, encoding="utf-8")))
- elif extension == ".csv":
- self.vocab_df = pd.read_csv(data_file).set_index("WORD")
- self.vocab_set = set(self.vocab_df.index)
- else:
- print("XlitError: Only Json/CSV file extension supported")
-
- def reposition(self, word_list):
- """Reorder Words in list"""
- new_list = []
- temp_ = word_list.copy()
- for v in word_list:
- if v in self.vocab_set:
- new_list.append(v)
- temp_.remove(v)
- new_list.extend(temp_)
-
- return new_list
-
-
-##=============== INSTANTIATION ================================================
-
-
-class XlitPiston:
- """
- For handling prediction & post-processing of transliteration for a single language
- Class dependency: Seq2Seq, GlyphStrawboss, VocabSanitizer
- Global Variables: F_DIR
- """
-
- def __init__(
- self,
- weight_path,
- vocab_file,
- tglyph_cfg_file,
- iglyph_cfg_file="en",
- device="cpu",
- ):
-
- self.device = device
- self.in_glyph_obj = GlyphStrawboss(iglyph_cfg_file)
- self.tgt_glyph_obj = GlyphStrawboss(glyphs=tglyph_cfg_file)
- self.voc_sanity = VocabSanitizer(vocab_file)
-
- self._numsym_set = set(
- json.load(open(tglyph_cfg_file, encoding="utf-8"))["numsym_map"].keys()
- )
- self._inchar_set = set("abcdefghijklmnopqrstuvwxyz")
- self._natscr_set = set().union(
- self.tgt_glyph_obj.glyphs, sum(self.tgt_glyph_obj.numsym_map.values(), [])
- )
-
- ## Model Config Static TODO: add defining in json support
- input_dim = self.in_glyph_obj.size()
- output_dim = self.tgt_glyph_obj.size()
- enc_emb_dim = 300
- dec_emb_dim = 300
- enc_hidden_dim = 512
- dec_hidden_dim = 512
- rnn_type = "lstm"
- enc2dec_hid = True
- attention = True
- enc_layers = 1
- dec_layers = 2
- m_dropout = 0
- enc_bidirect = True
- enc_outstate_dim = enc_hidden_dim * (2 if enc_bidirect else 1)
-
- enc = Encoder(
- input_dim=input_dim,
- embed_dim=enc_emb_dim,
- hidden_dim=enc_hidden_dim,
- rnn_type=rnn_type,
- layers=enc_layers,
- dropout=m_dropout,
- device=self.device,
- bidirectional=enc_bidirect,
- )
- dec = Decoder(
- output_dim=output_dim,
- embed_dim=dec_emb_dim,
- hidden_dim=dec_hidden_dim,
- rnn_type=rnn_type,
- layers=dec_layers,
- dropout=m_dropout,
- use_attention=attention,
- enc_outstate_dim=enc_outstate_dim,
- device=self.device,
- )
- self.model = Seq2Seq(enc, dec, pass_enc2dec_hid=enc2dec_hid, device=self.device)
- self.model = self.model.to(self.device)
- weights = torch.load(weight_path, map_location=torch.device(self.device))
-
- self.model.load_state_dict(weights)
- self.model.eval()
-
- def character_model(self, word, beam_width=1):
- in_vec = torch.from_numpy(self.in_glyph_obj.word2xlitvec(word)).to(self.device)
- ## change to active or passive beam
- p_out_list = self.model.active_beam_inference(in_vec, beam_width=beam_width)
- p_result = [
- self.tgt_glyph_obj.xlitvec2word(out.cpu().numpy()) for out in p_out_list
- ]
-
- result = self.voc_sanity.reposition(p_result)
-
- # List type
- return result
-
- def numsym_model(self, seg):
- """tgt_glyph_obj.numsym_map[x] returns a list object"""
- if len(seg) == 1:
- return [seg] + self.tgt_glyph_obj.numsym_map[seg]
-
- a = [self.tgt_glyph_obj.numsym_map[n][0] for n in seg]
- return [seg] + ["".join(a)]
-
- def _word_segementer(self, sequence):
-
- sequence = sequence.lower()
- accepted = set().union(self._numsym_set, self._inchar_set, self._natscr_set)
- # sequence = ''.join([i for i in sequence if i in accepted])
-
- segment = []
- idx = 0
- seq_ = list(sequence)
- while len(seq_):
- # for Number-Symbol
- temp = ""
- while len(seq_) and seq_[0] in self._numsym_set:
- temp += seq_[0]
- seq_.pop(0)
- if temp != "":
- segment.append(temp)
-
- # for Target Chars
- temp = ""
- while len(seq_) and seq_[0] in self._natscr_set:
- temp += seq_[0]
- seq_.pop(0)
- if temp != "":
- segment.append(temp)
-
- # for Input-Roman Chars
- temp = ""
- while len(seq_) and seq_[0] in self._inchar_set:
- temp += seq_[0]
- seq_.pop(0)
- if temp != "":
- segment.append(temp)
-
- temp = ""
- while len(seq_) and seq_[0] not in accepted:
- temp += seq_[0]
- seq_.pop(0)
- if temp != "":
- segment.append(temp)
-
- return segment
-
- def inferencer(self, sequence, beam_width=10):
-
- seg = self._word_segementer(sequence[:120])
- lit_seg = []
-
- p = 0
- while p < len(seg):
- if seg[p][0] in self._natscr_set:
- lit_seg.append([seg[p]])
- p += 1
-
- elif seg[p][0] in self._inchar_set:
- lit_seg.append(self.character_model(seg[p], beam_width=beam_width))
- p += 1
-
- elif seg[p][0] in self._numsym_set: # num & punc
- lit_seg.append(self.numsym_model(seg[p]))
- p += 1
- else:
- lit_seg.append([seg[p]])
- p += 1
-
- ## IF segment less/equal to 2 then return combinotorial,
- ## ELSE only return top1 of each result concatenated
- if len(lit_seg) == 1:
- final_result = lit_seg[0]
-
- elif len(lit_seg) == 2:
- final_result = [""]
- for seg in lit_seg:
- new_result = []
- for s in seg:
- for f in final_result:
- new_result.append(f + s)
- final_result = new_result
-
- else:
- new_result = []
- for seg in lit_seg:
- new_result.append(seg[0])
- final_result = ["".join(new_result)]
-
- return final_result
-
-
-from collections.abc import Iterable
-from pydload import dload
-import zipfile
-
-MODEL_DOWNLOAD_URL_PREFIX = "https://github.com/AI4Bharat/IndianNLP-Transliteration/releases/download/xlit_v0.5.0/"
-
-
-def is_folder_writable(folder):
- try:
- os.makedirs(folder, exist_ok=True)
- tmp_file = os.path.join(folder, ".write_test")
- with open(tmp_file, "w") as f:
- f.write("Permission Check")
- os.remove(tmp_file)
- return True
- except:
- return False
-
-
-def is_directory_writable(path):
- if os.name == "nt":
- return is_folder_writable(path)
- return os.access(path, os.W_OK | os.X_OK)
-
-
-class XlitEngine:
- """
- For Managing the top level tasks and applications of transliteration
- Global Variables: F_DIR
- """
-
- def __init__(
- self, lang2use="all", config_path="translit_models/default_lineup.json"
- ):
-
- lineup = json.load(open(os.path.join(F_DIR, config_path), encoding="utf-8"))
- self.lang_config = {}
- if isinstance(lang2use, str):
- if lang2use == "all":
- self.lang_config = lineup
- elif lang2use in lineup:
- self.lang_config[lang2use] = lineup[lang2use]
- else:
- raise Exception(
- "XlitError: The entered Langauge code not found. Available are {}".format(
- lineup.keys()
- )
- )
-
- elif isinstance(lang2use, Iterable):
- for l in lang2use:
- try:
- self.lang_config[l] = lineup[l]
- except:
- print(
- "XlitError: Language code {} not found, Skipping...".format(l)
- )
- else:
- raise Exception(
- "XlitError: lang2use must be a list of language codes (or) string of single language code"
- )
-
- if is_directory_writable(F_DIR):
- models_path = os.path.join(F_DIR, "translit_models")
- else:
- user_home = os.path.expanduser("~")
- models_path = os.path.join(user_home, ".AI4Bharat_Xlit_Models")
- os.makedirs(models_path, exist_ok=True)
- self.download_models(models_path)
-
- self.langs = {}
- self.lang_model = {}
- for la in self.lang_config:
- try:
- print("Loading {}...".format(la))
- self.lang_model[la] = XlitPiston(
- weight_path=os.path.join(
- models_path, self.lang_config[la]["weight"]
- ),
- vocab_file=os.path.join(models_path, self.lang_config[la]["vocab"]),
- tglyph_cfg_file=os.path.join(
- models_path, self.lang_config[la]["script"]
- ),
- iglyph_cfg_file="en",
- )
- self.langs[la] = self.lang_config[la]["name"]
- except Exception as error:
- print("XlitError: Failure in loading {} \n".format(la), error)
- print(XlitError.loading_err.value)
-
- def download_models(self, models_path):
- """
- Download models from GitHub Releases if not exists
- """
- for l in self.lang_config:
- lang_name = self.lang_config[l]["eng_name"]
- lang_model_path = os.path.join(models_path, lang_name)
- if not os.path.isdir(lang_model_path):
- print("Downloading model for language: %s" % lang_name)
- remote_url = MODEL_DOWNLOAD_URL_PREFIX + lang_name + ".zip"
- downloaded_zip_path = os.path.join(models_path, lang_name + ".zip")
- dload(url=remote_url, save_to_path=downloaded_zip_path, max_time=None)
-
- if not os.path.isfile(downloaded_zip_path):
- exit(
- f"ERROR: Unable to download model from {remote_url} into {models_path}"
- )
-
- with zipfile.ZipFile(downloaded_zip_path, "r") as zip_ref:
- zip_ref.extractall(models_path)
-
- if os.path.isdir(lang_model_path):
- os.remove(downloaded_zip_path)
- else:
- exit(
- f"ERROR: Unable to find models in {lang_model_path} after download"
- )
- return
-
- def translit_word(self, eng_word, lang_code="default", topk=7, beam_width=10):
- if eng_word == "":
- return []
-
- if lang_code in self.langs:
- try:
- res_list = self.lang_model[lang_code].inferencer(
- eng_word, beam_width=beam_width
- )
- return res_list[:topk]
-
- except Exception as error:
- print("XlitError:", traceback.format_exc())
- print(XlitError.internal_err.value)
- return XlitError.internal_err
-
- elif lang_code == "default":
- try:
- res_dict = {}
- for la in self.lang_model:
- res = self.lang_model[la].inferencer(
- eng_word, beam_width=beam_width
- )
- res_dict[la] = res[:topk]
- return res_dict
-
- except Exception as error:
- print("XlitError:", traceback.format_exc())
- print(XlitError.internal_err.value)
- return XlitError.internal_err
-
- else:
- print("XlitError: Unknown Langauge requested", lang_code)
- print(XlitError.lang_err.value)
- return XlitError.lang_err
-
- def translit_sentence(self, eng_sentence, lang_code="default", beam_width=10):
- if eng_sentence == "":
- return []
-
- if lang_code in self.langs:
- try:
- out_str = ""
- for word in eng_sentence.split():
- res_ = self.lang_model[lang_code].inferencer(
- word, beam_width=beam_width
- )
- out_str = out_str + res_[0] + " "
- return out_str[:-1]
-
- except Exception as error:
- print("XlitError:", traceback.format_exc())
- print(XlitError.internal_err.value)
- return XlitError.internal_err
-
- elif lang_code == "default":
- try:
- res_dict = {}
- for la in self.lang_model:
- out_str = ""
- for word in eng_sentence.split():
- res_ = self.lang_model[la].inferencer(
- word, beam_width=beam_width
- )
- out_str = out_str + res_[0] + " "
- res_dict[la] = out_str[:-1]
- return res_dict
-
- except Exception as error:
- print("XlitError:", traceback.format_exc())
- print(XlitError.internal_err.value)
- return XlitError.internal_err
-
- else:
- print("XlitError: Unknown Langauge requested", lang_code)
- print(XlitError.lang_err.value)
- return XlitError.lang_err
-
-
-if __name__ == "__main__":
-
- available_lang = [
- "bn",
- "gu",
- "hi",
- "kn",
- "gom",
- "mai",
- "ml",
- "mr",
- "pa",
- "sd",
- "si",
- "ta",
- "te",
- "ur",
- ]
-
- reg = re.compile(r"[a-zA-Z]")
- lang = "hi"
- engine = XlitEngine(
- lang
- ) # if you don't specify lang code here, this will give results in all langs available
- sent = "Hello World! ABCD क्या हाल है आपका?"
- words = [
- engine.translit_word(word, topk=1)[lang][0] if reg.match(word) else word
- for word in sent.split()
- ] # only transliterated en words, leaves rest as it is
- updated_sent = " ".join(words)
-
- print(updated_sent)
-
- # output : हेलो वर्ल्ड! क्या हाल है आपका?
-
- # y = engine.translit_sentence("Hello World !")['hi']
- # print(y)
diff --git a/spaces/Ibtehaj10/cheating-detection/pages/Login.py b/spaces/Ibtehaj10/cheating-detection/pages/Login.py
deleted file mode 100644
index cfa7fc618d97a837cdebac0c8ea09759bb435190..0000000000000000000000000000000000000000
--- a/spaces/Ibtehaj10/cheating-detection/pages/Login.py
+++ /dev/null
@@ -1,679 +0,0 @@
-import cv2
-import datetime
-import imutils
-import numpy as np
-from centroidtracker import CentroidTracker
-import pandas as pd
-import torch
-import streamlit as st
-import mediapipe as mp
-import cv2 as cv
-import numpy as np
-import tempfile
-import time
-from PIL import Image
-import pandas as pd
-import torch
-import base64
-import streamlit.components.v1 as components
-import csv
-import pickle
-from pathlib import Path
-import streamlit_authenticator as stauth
-import os
-import csv
-from streamlit_option_menu import option_menu
-# x-x-x-x-x-x-x-x-x-x-x-x-x-x LOGIN FORM x-x-x-x-x-x-x-x-x
-
-
-import streamlit as st
-import pandas as pd
-import hashlib
-import sqlite3
-#
-
-import pickle
-from pathlib import Path
-import streamlit_authenticator as stauth
-import pyautogui
-
-# print("Done !!!")
-
-data = ["student Count",'Date','Id','Mobile','Watch']
-with open('final.csv', 'w') as file:
- writer = csv.writer(file)
- writer.writerow(data)
-
-
-# # l1 = []
-# # l2 = []
-# # if st.button('signup'):
-
-
-# # usernames = st.text_input('Username')
-# # pwd = st.text_input('Password')
-# # l1.append(usernames)
-# # l2.append(pwd)
-
-# # names = ["dmin", "ser"]
-# # if st.button("signupsss"):
-# # username =l1
-
-# # password =l2
-
-# # hashed_passwords =stauth.Hasher(password).generate()
-
-# # file_path = Path(__file__).parent / "hashed_pw.pkl"
-
-# # with file_path.open("wb") as file:
-# # pickle.dump(hashed_passwords, file)
-
-
-# # elif st.button('Logins'):
-# names = ['dmin', 'ser']
-
-# username = []
-
-# file_path = Path(__file__).parent / 'hashed_pw.pkl'
-
-# with file_path.open('rb') as file:
-# hashed_passwords = pickle.load(file)
-
-# authenticator = stauth.Authenticate(names,username,hashed_passwords,'Cheating Detection','abcdefg',cookie_expiry_days=180)
-
-# name,authentication_status,username= authenticator.login('Login','main')
-
-
-# if authentication_status == False:
-# st.error('Username/Password is incorrect')
-
-# if authentication_status == None:
-# st.error('Please enter a username and password')
-
-@st.experimental_memo
-def get_img_as_base64(file):
- with open(file, "rb") as f:
- data = f.read()
- return base64.b64encode(data).decode()
-
-
-#img = get_img_as_base64("/home/anas/PersonTracking/WebUI/attendence.jpg")
-
-page_bg_img = f"""
-
-"""
-
-st.markdown(page_bg_img, unsafe_allow_html=True)
-files = pd.read_csv('LoginStatus.csv')
-
-
-idS = list(files['Id'])
-Pwd = list(files['Password'].astype(str))
-
-# print(type(Pwd))
-ids = st.sidebar.text_input('Enter a username')
-Pswd = st.sidebar.text_input('Enter a password',type="password",key="password")
-
-# print('list : ',type(Pwd))
-
-
-
-if (ids in idS) and(str(Pswd) in Pwd):
-
- # st.empty()
- date_time = time.strftime("%b %d %Y %-I:%M %p")
- date = date_time.split()
- dates = date[0:3]
- times = date[3:5]
- # x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-xAPPLICACTION -x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x
-
- def non_max_suppression_fast(boxes, overlapThresh):
- try:
- if len(boxes) == 0:
- return []
-
- if boxes.dtype.kind == "i":
- boxes = boxes.astype("float")
-
- pick = []
-
- x1 = boxes[:, 0]
- y1 = boxes[:, 1]
- x2 = boxes[:, 2]
- y2 = boxes[:, 3]
-
- area = (x2 - x1 + 1) * (y2 - y1 + 1)
- idxs = np.argsort(y2)
-
- while len(idxs) > 0:
- last = len(idxs) - 1
- i = idxs[last]
- pick.append(i)
-
- xx1 = np.maximum(x1[i], x1[idxs[:last]])
- yy1 = np.maximum(y1[i], y1[idxs[:last]])
- xx2 = np.minimum(x2[i], x2[idxs[:last]])
- yy2 = np.minimum(y2[i], y2[idxs[:last]])
-
- w = np.maximum(0, xx2 - xx1 + 1)
- h = np.maximum(0, yy2 - yy1 + 1)
-
- overlap = (w * h) / area[idxs[:last]]
-
- idxs = np.delete(idxs, np.concatenate(([last],
- np.where(overlap > overlapThresh)[0])))
-
- return boxes[pick].astype("int")
- except Exception as e:
- print("Exception occurred in non_max_suppression : {}".format(e))
-
-
- protopath = "MobileNetSSD_deploy.prototxt"
- modelpath = "MobileNetSSD_deploy.caffemodel"
- detector = cv2.dnn.readNetFromCaffe(prototxt=protopath, caffeModel=modelpath)
- # Only enable it if you are using OpenVino environment
- # detector.setPreferableBackend(cv2.dnn.DNN_BACKEND_INFERENCE_ENGINE)
- # detector.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
-
-
- CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
- "bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
- "dog", "horse", "motorbike", "person", "pottedplant", "sheep",
- "sofa", "train", "tvmonitor"]
-
- tracker = CentroidTracker(maxDisappeared=80, maxDistance=90)
-
- st.markdown(
- """
-
- """,
- unsafe_allow_html=True,
- )
- hide_streamlit_style = """
-
- """
- st.markdown(hide_streamlit_style, unsafe_allow_html=True)
-
-
- # Resize Images to fit Container
- @st.cache()
- # Get Image Dimensions
- def image_resize(image, width=None, height=None, inter=cv.INTER_AREA):
- dim = None
- (h,w) = image.shape[:2]
-
- if width is None and height is None:
- return image
-
- if width is None:
- r = width/float(w)
- dim = (int(w*r),height)
-
- else:
- r = width/float(w)
- dim = width, int(h*r)
-
- # Resize image
- resized = cv.resize(image,dim,interpolation=inter)
-
- return resized
-
- # About Page
- # authenticator.logout('Logout')
- EXAMPLE_NO = 3
-
-
- def streamlit_menu(example=1):
- if example == 1:
- # 1. as sidebar menu
- with st.sidebar:
- selected = option_menu(
- menu_title="Main Menu", # required
- options=["Home", "Projects", "Contact"], # required
- icons=["house", "book", "envelope"], # optional
- menu_icon="cast", # optional
- default_index=0, # optional
- )
- return selected
-
- if example == 2:
- # 2. horizontal menu w/o custom style
- selected = option_menu(
- menu_title=None, # required
- options=["Home", "Projects", "Contact"], # required
- icons=["house", "book", "envelope"], # optional
- menu_icon="cast", # optional
- default_index=0, # optional
- orientation="horizontal",
- )
- return selected
-
- if example == 3:
- # 2. horizontal menu with custom style
- selected = option_menu(
- menu_title=None, # required
- options=["Home", "Projects", "Contact"], # required
- icons=["house", "book", "envelope"], # optional
- menu_icon="cast", # optional
- default_index=0, # optional
- orientation="horizontal",
- styles={
- "container": {"padding": "0!important", "background-color": "#eaeaea"},
- "icon": {"color": "#080602", "font-size": "18px"},
- "nav-link": {
- "font-size": "18px",
- "text-align": "left",
- "color": "#000000",
- "margin": "0px",
- "--hover-color": "#E1A031",
- },
- "nav-link-selected": {"background-color": "#ffffff"},
- },
- )
- return selected
-
-
- selected = streamlit_menu(example=EXAMPLE_NO)
-
- if selected == "Home":
- st.title(f"You have selected {selected}")
- # if selected == "Projects":
- # st.title(f"You have selected {selected}")
- if selected == "Contact":
- st.title(f"You have selected {selected}")
- # app_mode = st.sidebar.selectbox(
- # 'App Mode',
- # ['Application']
- # )
- if selected == 'Projects':
- # 2. horizontal menu with custom style
- # selected = option_menu(
- # menu_title=None, # required
- # options=["Home", "Projects", "Contact"], # required
- # icons=["house", "book", "envelope"], # optional
- # menu_icon="cast", # optional
- # default_index=0, # optional
- # orientation="horizontal",
- # styles={
- # "container": {"padding": "0!important", "background-color": "#fafafa"},
- # "icon": {"color": "orange", "font-size": "25px"},
- # "nav-link": {
- # "font-size": "25px",
- # "text-align": "left",
- # "margin": "0px",
- # "--hover-color": "#eee",
- # },
- # "nav-link-selected": {"background-color": "blue"},
- # },
- # )
- # if app_mode == 'About':
- # st.title('About Product And Team')
- # st.markdown('''
- # Imran Bhai Project
- # ''')
- # st.markdown(
- # """
- #
- # """,
- # unsafe_allow_html=True,
- # )
-
-
-
-
- # elif app_mode == 'Application':
-
- st.set_option('deprecation.showfileUploaderEncoding', False)
-
- use_webcam = "pass"
- # record = st.sidebar.checkbox("Record Video")
-
- # if record:
- # st.checkbox('Recording', True)
-
- # drawing_spec = mp.solutions.drawing_utils.DrawingSpec(thickness=2, circle_radius=1)
-
- # st.sidebar.markdown('---')
-
- # ## Add Sidebar and Window style
- # st.markdown(
- # """
- #
- # """,
- # unsafe_allow_html=True,
- # )
-
- # max_faces = st.sidebar.number_input('Maximum Number of Faces', value=5, min_value=1)
- # st.sidebar.markdown('---')
- # detection_confidence = st.sidebar.slider('Min Detection Confidence', min_value=0.0,max_value=1.0,value=0.5)
- # tracking_confidence = st.sidebar.slider('Min Tracking Confidence', min_value=0.0,max_value=1.0,value=0.5)
- # st.sidebar.markdown('---')
-
- ## Get Video
- stframe = st.empty()
- video_file_buffer = st.file_uploader("Upload a Video", type=['mp4', 'mov', 'avi', 'asf', 'm4v'])
- temp_file = tempfile.NamedTemporaryFile(delete=False)
-
-
- if not video_file_buffer:
- if use_webcam:
- video = cv.VideoCapture(0)
- else:
- try:
- video = cv.VideoCapture(1)
- temp_file.name = video
- except:
- pass
- else:
- temp_file.write(video_file_buffer.read())
- video = cv.VideoCapture(temp_file.name)
-
- width = int(video.get(cv.CAP_PROP_FRAME_WIDTH))
- height = int(video.get(cv.CAP_PROP_FRAME_HEIGHT))
- fps_input = int(video.get(cv.CAP_PROP_FPS))
-
- ## Recording
- codec = cv.VideoWriter_fourcc('a','v','c','1')
- out = cv.VideoWriter('output1.mp4', codec, fps_input, (width,height))
-
- # st.sidebar.text('Input Video')
- # st.sidebar.video(temp_file.name)
-
- fps = 0
- i = 0
-
- drawing_spec = mp.solutions.drawing_utils.DrawingSpec(thickness=2, circle_radius=1)
-
- kpil, kpil2, kpil3,kpil4,kpil5, kpil6 = st.columns(6)
-
- with kpil:
- st.markdown('**Frame Rate**')
- kpil_text = st.markdown('0')
-
- with kpil2:
- st.markdown('**detection ID**')
- kpil2_text = st.markdown('0')
-
- with kpil3:
- st.markdown('**Mobile**')
- kpil3_text = st.markdown('0')
- with kpil4:
- st.markdown('**Watch**')
- kpil4_text = st.markdown('0')
- with kpil5:
- st.markdown('**Count**')
- kpil5_text = st.markdown('0')
- with kpil6:
- st.markdown('**Img Res**')
- kpil6_text = st.markdown('0')
-
-
-
- st.markdown('', unsafe_allow_html=True)
- # try:
- def main():
- db = {}
-
- # cap = cv2.VideoCapture('//home//anas//PersonTracking//WebUI//movement.mp4')
- path='/usr/local/lib/python3.10/dist-packages/yolo0vs5/yolov5s-int8.tflite'
- #count=0
- custom = 'yolov5s'
-
- model = torch.hub.load('/usr/local/lib/python3.10/dist-packages/yolovs5', custom, path,source='local',force_reload=True)
-
- b=model.names[0] = 'person'
- mobile = model.names[67] = 'cell phone'
- watch = model.names[75] = 'clock'
-
- fps_start_time = datetime.datetime.now()
- fps = 0
- size=416
-
- count=0
- counter=0
-
-
- color=(0,0,255)
-
- cy1=250
- offset=6
-
-
- pt1 = (120, 100)
- pt2 = (980, 1150)
- color = (0, 255, 0)
-
- pt3 = (283, 103)
- pt4 = (1500, 1150)
-
- cy2 = 500
- color = (0, 255, 0)
- total_frames = 0
- prevTime = 0
- cur_frame = 0
- count=0
- counter=0
- fps_start_time = datetime.datetime.now()
- fps = 0
- total_frames = 0
- lpc_count = 0
- opc_count = 0
- object_id_list = []
- # success = True
- if st.button("Detect"):
- try:
- while video.isOpened():
-
- ret, frame = video.read()
- frame = imutils.resize(frame, width=600)
- total_frames = total_frames + 1
-
- (H, W) = frame.shape[:2]
-
- blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
-
- detector.setInput(blob)
- person_detections = detector.forward()
- rects = []
- for i in np.arange(0, person_detections.shape[2]):
- confidence = person_detections[0, 0, i, 2]
- if confidence > 0.5:
- idx = int(person_detections[0, 0, i, 1])
-
- if CLASSES[idx] != "person":
- continue
-
- person_box = person_detections[0, 0, i, 3:7] * np.array([W, H, W, H])
- (startX, startY, endX, endY) = person_box.astype("int")
- rects.append(person_box)
-
- boundingboxes = np.array(rects)
- boundingboxes = boundingboxes.astype(int)
- rects = non_max_suppression_fast(boundingboxes, 0.3)
-
- objects = tracker.update(rects)
- for (objectId, bbox) in objects.items():
- x1, y1, x2, y2 = bbox
- x1 = int(x1)
- y1 = int(y1)
- x2 = int(x2)
- y2 = int(y2)
-
- cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)
- text = "ID: {}".format(objectId)
- # print(text)
- cv2.putText(frame, text, (x1, y1-5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
- if objectId not in object_id_list:
- object_id_list.append(objectId)
- fps_end_time = datetime.datetime.now()
- time_diff = fps_end_time - fps_start_time
- if time_diff.seconds == 0:
- fps = 0.0
- else:
- fps = (total_frames / time_diff.seconds)
-
- fps_text = "FPS: {:.2f}".format(fps)
-
- cv2.putText(frame, fps_text, (5, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
- lpc_count = len(objects)
- opc_count = len(object_id_list)
-
- lpc_txt = "LPC: {}".format(lpc_count)
- opc_txt = "OPC: {}".format(opc_count)
-
- count += 1
- if count % 4 != 0:
- continue
- # frame=cv.resize(frame, (600,500))
- # cv2.line(frame, pt1, pt2,color,2)
- # cv2.line(frame, pt3, pt4,color,2)
- results = model(frame,size)
- components = results.pandas().xyxy[0]
- for index, row in results.pandas().xyxy[0].iterrows():
- x1 = int(row['xmin'])
- y1 = int(row['ymin'])
- x2 = int(row['xmax'])
- y2 = int(row['ymax'])
- confidence = (row['confidence'])
- obj = (row['class'])
-
-
- # min':x1,'ymin':y1,'xmax':x2,'ymax':y2,'confidence':confidence,'Object':obj}
- # if lpc_txt is not None:
- # try:
- # db["student Count"] = [lpc_txt]
- # except:
- # db["student Count"] = ['N/A']
- if obj == 0:
- cv2.rectangle(frame,(x1,y1),(x2,y2),(0,0,255),2)
- rectx1,recty1 = ((x1+x2)/2,(y1+y2)/2)
- rectcenter = int(rectx1),int(recty1)
- cx = rectcenter[0]
- cy = rectcenter[1]
- cv2.circle(frame,(cx,cy),3,(0,255,0),-1)
- cv2.putText(frame,str(b), (x1,y1), cv2.FONT_HERSHEY_PLAIN,2,(255,255,255),2)
-
- db["student Count"] = [lpc_txt]
- db['Date'] = [date_time]
- db['id'] = ['N/A']
- db['Mobile']=['N/A']
- db['Watch'] = ['N/A']
- if cy<(cy1+offset) and cy>(cy1-offset):
- DB = []
- counter+=1
- DB.append(counter)
-
- ff = DB[-1]
- fx = str(ff)
- # cv2.line(frame, pt1, pt2,(0, 0, 255),2)
- # if cy<(cy2+offset) and cy>(cy2-offset):
-
- # cv2.line(frame, pt3, pt4,(0, 0, 255),2)
- font = cv2.FONT_HERSHEY_TRIPLEX
- cv2.putText(frame,fx,(50, 50),font, 1,(0, 0, 255),2,cv2.LINE_4)
- cv2.putText(frame,"Movement",(70, 70),font, 1,(0, 0, 255),2,cv2.LINE_4)
- kpil2_text.write(f"
",
- unsafe_allow_html=True)
-
-
- frame = cv.resize(frame,(0,0), fx=0.8, fy=0.8)
- frame = image_resize(image=frame, width=640)
- stframe.image(frame,channels='BGR', use_column_width=True)
- df = pd.DataFrame(db)
- df.to_csv('final.csv',mode='a',header=False,index=False)
- except:
- pass
- with open('final.csv') as f:
- st.download_button(label = 'Download Cheating Report',data=f,file_name='data.csv')
-
- os.remove("final.csv")
- main()
diff --git a/spaces/Iceclear/StableSR/StableSR/basicsr/models/sr_model.py b/spaces/Iceclear/StableSR/StableSR/basicsr/models/sr_model.py
deleted file mode 100644
index 787f1fd2eab5963579c764c1bfb87199b7dd196f..0000000000000000000000000000000000000000
--- a/spaces/Iceclear/StableSR/StableSR/basicsr/models/sr_model.py
+++ /dev/null
@@ -1,279 +0,0 @@
-import torch
-from collections import OrderedDict
-from os import path as osp
-from tqdm import tqdm
-
-from basicsr.archs import build_network
-from basicsr.losses import build_loss
-from basicsr.metrics import calculate_metric
-from basicsr.utils import get_root_logger, imwrite, tensor2img
-from basicsr.utils.registry import MODEL_REGISTRY
-from .base_model import BaseModel
-
-
-@MODEL_REGISTRY.register()
-class SRModel(BaseModel):
- """Base SR model for single image super-resolution."""
-
- def __init__(self, opt):
- super(SRModel, self).__init__(opt)
-
- # define network
- self.net_g = build_network(opt['network_g'])
- self.net_g = self.model_to_device(self.net_g)
- self.print_network(self.net_g)
-
- # load pretrained models
- load_path = self.opt['path'].get('pretrain_network_g', None)
- if load_path is not None:
- param_key = self.opt['path'].get('param_key_g', 'params')
- self.load_network(self.net_g, load_path, self.opt['path'].get('strict_load_g', True), param_key)
-
- if self.is_train:
- self.init_training_settings()
-
- def init_training_settings(self):
- self.net_g.train()
- train_opt = self.opt['train']
-
- self.ema_decay = train_opt.get('ema_decay', 0)
- if self.ema_decay > 0:
- logger = get_root_logger()
- logger.info(f'Use Exponential Moving Average with decay: {self.ema_decay}')
- # define network net_g with Exponential Moving Average (EMA)
- # net_g_ema is used only for testing on one GPU and saving
- # There is no need to wrap with DistributedDataParallel
- self.net_g_ema = build_network(self.opt['network_g']).to(self.device)
- # load pretrained model
- load_path = self.opt['path'].get('pretrain_network_g', None)
- if load_path is not None:
- self.load_network(self.net_g_ema, load_path, self.opt['path'].get('strict_load_g', True), 'params_ema')
- else:
- self.model_ema(0) # copy net_g weight
- self.net_g_ema.eval()
-
- # define losses
- if train_opt.get('pixel_opt'):
- self.cri_pix = build_loss(train_opt['pixel_opt']).to(self.device)
- else:
- self.cri_pix = None
-
- if train_opt.get('perceptual_opt'):
- self.cri_perceptual = build_loss(train_opt['perceptual_opt']).to(self.device)
- else:
- self.cri_perceptual = None
-
- if self.cri_pix is None and self.cri_perceptual is None:
- raise ValueError('Both pixel and perceptual losses are None.')
-
- # set up optimizers and schedulers
- self.setup_optimizers()
- self.setup_schedulers()
-
- def setup_optimizers(self):
- train_opt = self.opt['train']
- optim_params = []
- for k, v in self.net_g.named_parameters():
- if v.requires_grad:
- optim_params.append(v)
- else:
- logger = get_root_logger()
- logger.warning(f'Params {k} will not be optimized.')
-
- optim_type = train_opt['optim_g'].pop('type')
- self.optimizer_g = self.get_optimizer(optim_type, optim_params, **train_opt['optim_g'])
- self.optimizers.append(self.optimizer_g)
-
- def feed_data(self, data):
- self.lq = data['lq'].to(self.device)
- if 'gt' in data:
- self.gt = data['gt'].to(self.device)
-
- def optimize_parameters(self, current_iter):
- self.optimizer_g.zero_grad()
- self.output = self.net_g(self.lq)
-
- l_total = 0
- loss_dict = OrderedDict()
- # pixel loss
- if self.cri_pix:
- l_pix = self.cri_pix(self.output, self.gt)
- l_total += l_pix
- loss_dict['l_pix'] = l_pix
- # perceptual loss
- if self.cri_perceptual:
- l_percep, l_style = self.cri_perceptual(self.output, self.gt)
- if l_percep is not None:
- l_total += l_percep
- loss_dict['l_percep'] = l_percep
- if l_style is not None:
- l_total += l_style
- loss_dict['l_style'] = l_style
-
- l_total.backward()
- self.optimizer_g.step()
-
- self.log_dict = self.reduce_loss_dict(loss_dict)
-
- if self.ema_decay > 0:
- self.model_ema(decay=self.ema_decay)
-
- def test(self):
- if hasattr(self, 'net_g_ema'):
- self.net_g_ema.eval()
- with torch.no_grad():
- self.output = self.net_g_ema(self.lq)
- else:
- self.net_g.eval()
- with torch.no_grad():
- self.output = self.net_g(self.lq)
- self.net_g.train()
-
- def test_selfensemble(self):
- # TODO: to be tested
- # 8 augmentations
- # modified from https://github.com/thstkdgus35/EDSR-PyTorch
-
- def _transform(v, op):
- # if self.precision != 'single': v = v.float()
- v2np = v.data.cpu().numpy()
- if op == 'v':
- tfnp = v2np[:, :, :, ::-1].copy()
- elif op == 'h':
- tfnp = v2np[:, :, ::-1, :].copy()
- elif op == 't':
- tfnp = v2np.transpose((0, 1, 3, 2)).copy()
-
- ret = torch.Tensor(tfnp).to(self.device)
- # if self.precision == 'half': ret = ret.half()
-
- return ret
-
- # prepare augmented data
- lq_list = [self.lq]
- for tf in 'v', 'h', 't':
- lq_list.extend([_transform(t, tf) for t in lq_list])
-
- # inference
- if hasattr(self, 'net_g_ema'):
- self.net_g_ema.eval()
- with torch.no_grad():
- out_list = [self.net_g_ema(aug) for aug in lq_list]
- else:
- self.net_g.eval()
- with torch.no_grad():
- out_list = [self.net_g_ema(aug) for aug in lq_list]
- self.net_g.train()
-
- # merge results
- for i in range(len(out_list)):
- if i > 3:
- out_list[i] = _transform(out_list[i], 't')
- if i % 4 > 1:
- out_list[i] = _transform(out_list[i], 'h')
- if (i % 4) % 2 == 1:
- out_list[i] = _transform(out_list[i], 'v')
- output = torch.cat(out_list, dim=0)
-
- self.output = output.mean(dim=0, keepdim=True)
-
- def dist_validation(self, dataloader, current_iter, tb_logger, save_img):
- if self.opt['rank'] == 0:
- self.nondist_validation(dataloader, current_iter, tb_logger, save_img)
-
- def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
- dataset_name = dataloader.dataset.opt['name']
- with_metrics = self.opt['val'].get('metrics') is not None
- use_pbar = self.opt['val'].get('pbar', False)
-
- if with_metrics:
- if not hasattr(self, 'metric_results'): # only execute in the first run
- self.metric_results = {metric: 0 for metric in self.opt['val']['metrics'].keys()}
- # initialize the best metric results for each dataset_name (supporting multiple validation datasets)
- self._initialize_best_metric_results(dataset_name)
- # zero self.metric_results
- if with_metrics:
- self.metric_results = {metric: 0 for metric in self.metric_results}
-
- metric_data = dict()
- if use_pbar:
- pbar = tqdm(total=len(dataloader), unit='image')
-
- for idx, val_data in enumerate(dataloader):
- img_name = osp.splitext(osp.basename(val_data['lq_path'][0]))[0]
- self.feed_data(val_data)
- self.test()
-
- visuals = self.get_current_visuals()
- sr_img = tensor2img([visuals['result']])
- metric_data['img'] = sr_img
- if 'gt' in visuals:
- gt_img = tensor2img([visuals['gt']])
- metric_data['img2'] = gt_img
- del self.gt
-
- # tentative for out of GPU memory
- del self.lq
- del self.output
- torch.cuda.empty_cache()
-
- if save_img:
- if self.opt['is_train']:
- save_img_path = osp.join(self.opt['path']['visualization'], img_name,
- f'{img_name}_{current_iter}.png')
- else:
- if self.opt['val']['suffix']:
- save_img_path = osp.join(self.opt['path']['visualization'], dataset_name,
- f'{img_name}_{self.opt["val"]["suffix"]}.png')
- else:
- save_img_path = osp.join(self.opt['path']['visualization'], dataset_name,
- f'{img_name}_{self.opt["name"]}.png')
- imwrite(sr_img, save_img_path)
-
- if with_metrics:
- # calculate metrics
- for name, opt_ in self.opt['val']['metrics'].items():
- self.metric_results[name] += calculate_metric(metric_data, opt_)
- if use_pbar:
- pbar.update(1)
- pbar.set_description(f'Test {img_name}')
- if use_pbar:
- pbar.close()
-
- if with_metrics:
- for metric in self.metric_results.keys():
- self.metric_results[metric] /= (idx + 1)
- # update the best metric result
- self._update_best_metric_result(dataset_name, metric, self.metric_results[metric], current_iter)
-
- self._log_validation_metric_values(current_iter, dataset_name, tb_logger)
-
- def _log_validation_metric_values(self, current_iter, dataset_name, tb_logger):
- log_str = f'Validation {dataset_name}\n'
- for metric, value in self.metric_results.items():
- log_str += f'\t # {metric}: {value:.4f}'
- if hasattr(self, 'best_metric_results'):
- log_str += (f'\tBest: {self.best_metric_results[dataset_name][metric]["val"]:.4f} @ '
- f'{self.best_metric_results[dataset_name][metric]["iter"]} iter')
- log_str += '\n'
-
- logger = get_root_logger()
- logger.info(log_str)
- if tb_logger:
- for metric, value in self.metric_results.items():
- tb_logger.add_scalar(f'metrics/{dataset_name}/{metric}', value, current_iter)
-
- def get_current_visuals(self):
- out_dict = OrderedDict()
- out_dict['lq'] = self.lq.detach().cpu()
- out_dict['result'] = self.output.detach().cpu()
- if hasattr(self, 'gt'):
- out_dict['gt'] = self.gt.detach().cpu()
- return out_dict
-
- def save(self, epoch, current_iter):
- if hasattr(self, 'net_g_ema'):
- self.save_network([self.net_g, self.net_g_ema], 'net_g', current_iter, param_key=['params', 'params_ema'])
- else:
- self.save_network(self.net_g, 'net_g', current_iter)
- self.save_training_state(epoch, current_iter)
diff --git a/spaces/InpaintAI/Inpaint-Anything/utils/visual_mask_on_img.py b/spaces/InpaintAI/Inpaint-Anything/utils/visual_mask_on_img.py
deleted file mode 100644
index 2897ee90d588d66bcf8382bcbe74b191eda91b6d..0000000000000000000000000000000000000000
--- a/spaces/InpaintAI/Inpaint-Anything/utils/visual_mask_on_img.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import cv2
-import sys
-import argparse
-import numpy as np
-from PIL import Image
-from pathlib import Path
-from matplotlib import pyplot as plt
-from typing import Any, Dict, List
-import glob
-
-from utils import load_img_to_array, show_mask
-
-
-def setup_args(parser):
- parser.add_argument(
- "--input_img", type=str, required=True,
- help="Path to a single input img",
- )
- parser.add_argument(
- "--input_mask_glob", type=str, required=True,
- help="Glob to input masks",
- )
- parser.add_argument(
- "--output_dir", type=str, required=True,
- help="Output path to the directory with results.",
- )
-
-if __name__ == "__main__":
- """Example usage:
- python visual_mask_on_img.py \
- --input_img FA_demo/FA1_dog.png \
- --input_mask_glob "results/FA1_dog/mask*.png" \
- --output_dir results
- """
- parser = argparse.ArgumentParser()
- setup_args(parser)
- args = parser.parse_args(sys.argv[1:])
-
- img = load_img_to_array(args.input_img)
- img_stem = Path(args.input_img).stem
-
- mask_ps = sorted(glob.glob(args.input_mask_glob))
-
- out_dir = Path(args.output_dir) / img_stem
- out_dir.mkdir(parents=True, exist_ok=True)
-
- for mask_p in mask_ps:
- mask = load_img_to_array(mask_p)
- mask = mask.astype(np.uint8)
-
- # path to the results
- img_mask_p = out_dir / f"with_{Path(mask_p).name}"
-
- # save the masked image
- dpi = plt.rcParams['figure.dpi']
- height, width = img.shape[:2]
- plt.figure(figsize=(width/dpi/0.77, height/dpi/0.77))
- plt.imshow(img)
- plt.axis('off')
- show_mask(plt.gca(), mask, random_color=False)
- plt.savefig(img_mask_p, bbox_inches='tight', pad_inches=0)
- plt.close()
diff --git a/spaces/JUNGU/VToonify/vtoonify/model/vtoonify.py b/spaces/JUNGU/VToonify/vtoonify/model/vtoonify.py
deleted file mode 100644
index 6556a0a6c734be5f413f4683eb63c44f449c6af8..0000000000000000000000000000000000000000
--- a/spaces/JUNGU/VToonify/vtoonify/model/vtoonify.py
+++ /dev/null
@@ -1,286 +0,0 @@
-import torch
-import numpy as np
-import math
-from torch import nn
-from model.stylegan.model import ConvLayer, EqualLinear, Generator, ResBlock
-from model.dualstylegan import AdaptiveInstanceNorm, AdaResBlock, DualStyleGAN
-import torch.nn.functional as F
-
-# IC-GAN: stylegan discriminator
-class ConditionalDiscriminator(nn.Module):
- def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1], use_condition=False, style_num=None):
- super().__init__()
-
- channels = {
- 4: 512,
- 8: 512,
- 16: 512,
- 32: 512,
- 64: 256 * channel_multiplier,
- 128: 128 * channel_multiplier,
- 256: 64 * channel_multiplier,
- 512: 32 * channel_multiplier,
- 1024: 16 * channel_multiplier,
- }
-
- convs = [ConvLayer(3, channels[size], 1)]
-
- log_size = int(math.log(size, 2))
-
- in_channel = channels[size]
-
- for i in range(log_size, 2, -1):
- out_channel = channels[2 ** (i - 1)]
-
- convs.append(ResBlock(in_channel, out_channel, blur_kernel))
-
- in_channel = out_channel
-
- self.convs = nn.Sequential(*convs)
-
- self.stddev_group = 4
- self.stddev_feat = 1
- self.use_condition = use_condition
-
- if self.use_condition:
- self.condition_dim = 128
- # map style degree to 64-dimensional vector
- self.label_mapper = nn.Sequential(
- nn.Linear(1, 64),
- nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Linear(64, 64),
- nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Linear(64, self.condition_dim//2),
- )
- # map style code index to 64-dimensional vector
- self.style_mapper = nn.Embedding(style_num, self.condition_dim-self.condition_dim//2)
- else:
- self.condition_dim = 1
-
- self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
- self.final_linear = nn.Sequential(
- EqualLinear(channels[4] * 4 * 4, channels[4], activation="fused_lrelu"),
- EqualLinear(channels[4], self.condition_dim),
- )
-
- def forward(self, input, degree_label=None, style_ind=None):
- out = self.convs(input)
-
- batch, channel, height, width = out.shape
- group = min(batch, self.stddev_group)
- stddev = out.view(
- group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
- )
- stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
- stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
- stddev = stddev.repeat(group, 1, height, width)
- out = torch.cat([out, stddev], 1)
-
- out = self.final_conv(out)
- out = out.view(batch, -1)
-
- if self.use_condition:
- h = self.final_linear(out)
- condition = torch.cat((self.label_mapper(degree_label), self.style_mapper(style_ind)), dim=1)
- out = (h * condition).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.condition_dim))
- else:
- out = self.final_linear(out)
-
- return out
-
-
-class VToonifyResBlock(nn.Module):
- def __init__(self, fin):
- super().__init__()
-
- self.conv = nn.Conv2d(fin, fin, 3, 1, 1)
- self.conv2 = nn.Conv2d(fin, fin, 3, 1, 1)
- self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
-
- def forward(self, x):
- out = self.lrelu(self.conv(x))
- out = self.lrelu(self.conv2(out))
- out = (out + x) / math.sqrt(2)
- return out
-
-class Fusion(nn.Module):
- def __init__(self, in_channels, skip_channels, out_channels):
- super().__init__()
-
- # create conv layers
- self.conv = nn.Conv2d(in_channels + skip_channels, out_channels, 3, 1, 1, bias=True)
- self.norm = AdaptiveInstanceNorm(in_channels + skip_channels, 128)
- self.conv2 = nn.Conv2d(in_channels + skip_channels, 1, 3, 1, 1, bias=True)
- #'''
- self.linear = nn.Sequential(
- nn.Linear(1, 64),
- nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Linear(64, 128),
- nn.LeakyReLU(negative_slope=0.2, inplace=True)
- )
-
- def forward(self, f_G, f_E, d_s=1):
- # label of style degree
- label = self.linear(torch.zeros(f_G.size(0),1).to(f_G.device) + d_s)
- out = torch.cat([f_G, abs(f_G-f_E)], dim=1)
- m_E = (F.relu(self.conv2(self.norm(out, label)))).tanh()
- f_out = self.conv(torch.cat([f_G, f_E * m_E], dim=1))
- return f_out, m_E
-
-class VToonify(nn.Module):
- def __init__(self,
- in_size=256,
- out_size=1024,
- img_channels=3,
- style_channels=512,
- num_mlps=8,
- channel_multiplier=2,
- num_res_layers=6,
- backbone = 'dualstylegan',
- ):
-
- super().__init__()
-
- self.backbone = backbone
- if self.backbone == 'dualstylegan':
- # DualStyleGAN, with weights being fixed
- self.generator = DualStyleGAN(out_size, style_channels, num_mlps, channel_multiplier)
- else:
- # StyleGANv2, with weights being fixed
- self.generator = Generator(out_size, style_channels, num_mlps, channel_multiplier)
-
- self.in_size = in_size
- self.style_channels = style_channels
- channels = self.generator.channels
-
- # encoder
- num_styles = int(np.log2(out_size)) * 2 - 2
- encoder_res = [2**i for i in range(int(np.log2(in_size)), 4, -1)]
- self.encoder = nn.ModuleList()
- self.encoder.append(
- nn.Sequential(
- nn.Conv2d(img_channels+19, 32, 3, 1, 1, bias=True),
- nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Conv2d(32, channels[in_size], 3, 1, 1, bias=True),
- nn.LeakyReLU(negative_slope=0.2, inplace=True)))
-
- for res in encoder_res:
- in_channels = channels[res]
- if res > 32:
- out_channels = channels[res // 2]
- block = nn.Sequential(
- nn.Conv2d(in_channels, out_channels, 3, 2, 1, bias=True),
- nn.LeakyReLU(negative_slope=0.2, inplace=True),
- nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=True),
- nn.LeakyReLU(negative_slope=0.2, inplace=True))
- self.encoder.append(block)
- else:
- layers = []
- for _ in range(num_res_layers):
- layers.append(VToonifyResBlock(in_channels))
- self.encoder.append(nn.Sequential(*layers))
- block = nn.Conv2d(in_channels, img_channels, 1, 1, 0, bias=True)
- self.encoder.append(block)
-
- # trainable fusion module
- self.fusion_out = nn.ModuleList()
- self.fusion_skip = nn.ModuleList()
- for res in encoder_res[::-1]:
- num_channels = channels[res]
- if self.backbone == 'dualstylegan':
- self.fusion_out.append(
- Fusion(num_channels, num_channels, num_channels))
- else:
- self.fusion_out.append(
- nn.Conv2d(num_channels * 2, num_channels, 3, 1, 1, bias=True))
-
- self.fusion_skip.append(
- nn.Conv2d(num_channels + 3, 3, 3, 1, 1, bias=True))
-
- # Modified ModRes blocks in DualStyleGAN, with weights being fixed
- if self.backbone == 'dualstylegan':
- self.res = nn.ModuleList()
- self.res.append(AdaResBlock(self.generator.channels[2 ** 2])) # for conv1, no use in this model
- for i in range(3, 6):
- out_channel = self.generator.channels[2 ** i]
- self.res.append(AdaResBlock(out_channel, dilation=2**(5-i)))
- self.res.append(AdaResBlock(out_channel, dilation=2**(5-i)))
-
-
- def forward(self, x, style, d_s=None, return_mask=False, return_feat=False):
- # map style to W+ space
- if style is not None and style.ndim < 3:
- if self.backbone == 'dualstylegan':
- resstyles = self.generator.style(style).unsqueeze(1).repeat(1, self.generator.n_latent, 1)
- adastyles = style.unsqueeze(1).repeat(1, self.generator.n_latent, 1)
- elif style is not None:
- nB, nL, nD = style.shape
- if self.backbone == 'dualstylegan':
- resstyles = self.generator.style(style.reshape(nB*nL, nD)).reshape(nB, nL, nD)
- adastyles = style
- if self.backbone == 'dualstylegan':
- adastyles = adastyles.clone()
- for i in range(7, self.generator.n_latent):
- adastyles[:, i] = self.generator.res[i](adastyles[:, i])
-
- # obtain multi-scale content features
- feat = x
- encoder_features = []
- # downsampling conv parts of E
- for block in self.encoder[:-2]:
- feat = block(feat)
- encoder_features.append(feat)
- encoder_features = encoder_features[::-1]
- # Resblocks in E
- for ii, block in enumerate(self.encoder[-2]):
- feat = block(feat)
- # adjust Resblocks with ModRes blocks
- if self.backbone == 'dualstylegan':
- feat = self.res[ii+1](feat, resstyles[:, ii+1], d_s)
- # the last-layer feature of E (inputs of backbone)
- out = feat
- skip = self.encoder[-1](feat)
- if return_feat:
- return out, skip
-
- # 32x32 ---> higher res
- _index = 1
- m_Es = []
- for conv1, conv2, to_rgb in zip(
- self.stylegan().convs[6::2], self.stylegan().convs[7::2], self.stylegan().to_rgbs[3:]):
-
- # pass the mid-layer features of E to the corresponding resolution layers of G
- if 2 ** (5+((_index-1)//2)) <= self.in_size:
- fusion_index = (_index - 1) // 2
- f_E = encoder_features[fusion_index]
-
- if self.backbone == 'dualstylegan':
- out, m_E = self.fusion_out[fusion_index](out, f_E, d_s)
- skip = self.fusion_skip[fusion_index](torch.cat([skip, f_E*m_E], dim=1))
- m_Es += [m_E]
- else:
- out = self.fusion_out[fusion_index](torch.cat([out, f_E], dim=1))
- skip = self.fusion_skip[fusion_index](torch.cat([skip, f_E], dim=1))
-
- # remove the noise input
- batch, _, height, width = out.shape
- noise = x.new_empty(batch, 1, height * 2, width * 2).normal_().detach() * 0.0
-
- out = conv1(out, adastyles[:, _index+6], noise=noise)
- out = conv2(out, adastyles[:, _index+7], noise=noise)
- skip = to_rgb(out, adastyles[:, _index+8], skip)
- _index += 2
-
- image = skip
- if return_mask and self.backbone == 'dualstylegan':
- return image, m_Es
- return image
-
- def stylegan(self):
- if self.backbone == 'dualstylegan':
- return self.generator.generator
- else:
- return self.generator
-
- def zplus2wplus(self, zplus):
- return self.stylegan().style(zplus.reshape(zplus.shape[0]*zplus.shape[1], zplus.shape[2])).reshape(zplus.shape)
\ No newline at end of file
diff --git a/spaces/Jackflack09/diffuse-custom/diffusers/utils/pil_utils.py b/spaces/Jackflack09/diffuse-custom/diffusers/utils/pil_utils.py
deleted file mode 100644
index 39d0a15a4e2fe39fecb01951b36c43368492f983..0000000000000000000000000000000000000000
--- a/spaces/Jackflack09/diffuse-custom/diffusers/utils/pil_utils.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import PIL.Image
-import PIL.ImageOps
-from packaging import version
-
-
-if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
- PIL_INTERPOLATION = {
- "linear": PIL.Image.Resampling.BILINEAR,
- "bilinear": PIL.Image.Resampling.BILINEAR,
- "bicubic": PIL.Image.Resampling.BICUBIC,
- "lanczos": PIL.Image.Resampling.LANCZOS,
- "nearest": PIL.Image.Resampling.NEAREST,
- }
-else:
- PIL_INTERPOLATION = {
- "linear": PIL.Image.LINEAR,
- "bilinear": PIL.Image.BILINEAR,
- "bicubic": PIL.Image.BICUBIC,
- "lanczos": PIL.Image.LANCZOS,
- "nearest": PIL.Image.NEAREST,
- }
diff --git a/spaces/Jamel887/Rvc-tio887/lib/infer_pack/modules/F0Predictor/F0Predictor.py b/spaces/Jamel887/Rvc-tio887/lib/infer_pack/modules/F0Predictor/F0Predictor.py
deleted file mode 100644
index f56e49e7f0e6eab3babf0711cae2933371b9f9cc..0000000000000000000000000000000000000000
--- a/spaces/Jamel887/Rvc-tio887/lib/infer_pack/modules/F0Predictor/F0Predictor.py
+++ /dev/null
@@ -1,16 +0,0 @@
-class F0Predictor(object):
- def compute_f0(self, wav, p_len):
- """
- input: wav:[signal_length]
- p_len:int
- output: f0:[signal_length//hop_length]
- """
- pass
-
- def compute_f0_uv(self, wav, p_len):
- """
- input: wav:[signal_length]
- p_len:int
- output: f0:[signal_length//hop_length],uv:[signal_length//hop_length]
- """
- pass
diff --git a/spaces/Jamkonams/AutoGPT/autogpt/agent/agent.py b/spaces/Jamkonams/AutoGPT/autogpt/agent/agent.py
deleted file mode 100644
index ee7885f8844022597321fa6b492430ec34c0d6b9..0000000000000000000000000000000000000000
--- a/spaces/Jamkonams/AutoGPT/autogpt/agent/agent.py
+++ /dev/null
@@ -1,197 +0,0 @@
-from colorama import Fore, Style
-
-from autogpt.app import execute_command, get_command
-from autogpt.chat import chat_with_ai, create_chat_message
-from autogpt.config import Config
-from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques
-from autogpt.json_utils.utilities import validate_json
-from autogpt.logs import logger, print_assistant_thoughts
-from autogpt.speech import say_text
-from autogpt.spinner import Spinner
-from autogpt.utils import clean_input
-
-
-class Agent:
- """Agent class for interacting with Auto-GPT.
-
- Attributes:
- ai_name: The name of the agent.
- memory: The memory object to use.
- full_message_history: The full message history.
- next_action_count: The number of actions to execute.
- system_prompt: The system prompt is the initial prompt that defines everything the AI needs to know to achieve its task successfully.
- Currently, the dynamic and customizable information in the system prompt are ai_name, description and goals.
-
- triggering_prompt: The last sentence the AI will see before answering. For Auto-GPT, this prompt is:
- Determine which next command to use, and respond using the format specified above:
- The triggering prompt is not part of the system prompt because between the system prompt and the triggering
- prompt we have contextual information that can distract the AI and make it forget that its goal is to find the next task to achieve.
- SYSTEM PROMPT
- CONTEXTUAL INFORMATION (memory, previous conversations, anything relevant)
- TRIGGERING PROMPT
-
- The triggering prompt reminds the AI about its short term meta task (defining the next task)
- """
-
- def __init__(
- self,
- ai_name,
- memory,
- full_message_history,
- next_action_count,
- system_prompt,
- triggering_prompt,
- ):
- self.ai_name = ai_name
- self.memory = memory
- self.full_message_history = full_message_history
- self.next_action_count = next_action_count
- self.system_prompt = system_prompt
- self.triggering_prompt = triggering_prompt
-
- def start_interaction_loop(self):
- # Interaction Loop
- cfg = Config()
- loop_count = 0
- command_name = None
- arguments = None
- user_input = ""
-
- while True:
- # Discontinue if continuous limit is reached
- loop_count += 1
- if (
- cfg.continuous_mode
- and cfg.continuous_limit > 0
- and loop_count > cfg.continuous_limit
- ):
- logger.typewriter_log(
- "Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}"
- )
- break
-
- # Send message to AI, get response
- with Spinner("Thinking... "):
- assistant_reply = chat_with_ai(
- self.system_prompt,
- self.triggering_prompt,
- self.full_message_history,
- self.memory,
- cfg.fast_token_limit,
- ) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
-
- assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply)
-
- # Print Assistant thoughts
- if assistant_reply_json != {}:
- validate_json(assistant_reply_json, "llm_response_format_1")
- # Get command name and arguments
- try:
- print_assistant_thoughts(self.ai_name, assistant_reply_json)
- command_name, arguments = get_command(assistant_reply_json)
- # command_name, arguments = assistant_reply_json_valid["command"]["name"], assistant_reply_json_valid["command"]["args"]
- if cfg.speak_mode:
- say_text(f"I want to execute {command_name}")
- except Exception as e:
- logger.error("Error: \n", str(e))
-
- if not cfg.continuous_mode and self.next_action_count == 0:
- ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
- # Get key press: Prompt the user to press enter to continue or escape
- # to exit
- logger.typewriter_log(
- "NEXT ACTION: ",
- Fore.CYAN,
- f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} "
- f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
- )
- print(
- "Enter 'y' to authorise command, 'y -N' to run N continuous "
- "commands, 'n' to exit program, or enter feedback for "
- f"{self.ai_name}...",
- flush=True,
- )
- while True:
- console_input = clean_input(
- Fore.MAGENTA + "Input:" + Style.RESET_ALL
- )
- if console_input.lower().strip() == "y":
- user_input = "GENERATE NEXT COMMAND JSON"
- break
- elif console_input.lower().strip() == "":
- print("Invalid input format.")
- continue
- elif console_input.lower().startswith("y -"):
- try:
- self.next_action_count = abs(
- int(console_input.split(" ")[1])
- )
- user_input = "GENERATE NEXT COMMAND JSON"
- except ValueError:
- print(
- "Invalid input format. Please enter 'y -n' where n is"
- " the number of continuous tasks."
- )
- continue
- break
- elif console_input.lower() == "n":
- user_input = "EXIT"
- break
- else:
- user_input = console_input
- command_name = "human_feedback"
- break
-
- if user_input == "GENERATE NEXT COMMAND JSON":
- logger.typewriter_log(
- "-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
- Fore.MAGENTA,
- "",
- )
- elif user_input == "EXIT":
- print("Exiting...", flush=True)
- break
- else:
- # Print command
- logger.typewriter_log(
- "NEXT ACTION: ",
- Fore.CYAN,
- f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL}"
- f" ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
- )
-
- # Execute command
- if command_name is not None and command_name.lower().startswith("error"):
- result = (
- f"Command {command_name} threw the following error: {arguments}"
- )
- elif command_name == "human_feedback":
- result = f"Human feedback: {user_input}"
- else:
- result = (
- f"Command {command_name} returned: "
- f"{execute_command(command_name, arguments)}"
- )
- if self.next_action_count > 0:
- self.next_action_count -= 1
-
- memory_to_add = (
- f"Assistant Reply: {assistant_reply} "
- f"\nResult: {result} "
- f"\nHuman Feedback: {user_input} "
- )
-
- self.memory.add(memory_to_add)
-
- # Check if there's a result from the command append it to the message
- # history
- if result is not None:
- self.full_message_history.append(create_chat_message("system", result))
- logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
- else:
- self.full_message_history.append(
- create_chat_message("system", "Unable to execute command")
- )
- logger.typewriter_log(
- "SYSTEM: ", Fore.YELLOW, "Unable to execute command"
- )
diff --git a/spaces/JoYCC/ICBU-NPU-FashionGPT-70B-V1.1/README.md b/spaces/JoYCC/ICBU-NPU-FashionGPT-70B-V1.1/README.md
deleted file mode 100644
index 9da135f813f5c02d180770f12f1b635a523454e3..0000000000000000000000000000000000000000
--- a/spaces/JoYCC/ICBU-NPU-FashionGPT-70B-V1.1/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: ICBU NPU FashionGPT 70B V1.1
-emoji: ⚡
-colorFrom: purple
-colorTo: purple
-sdk: gradio
-sdk_version: 3.44.4
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/chat_func.py b/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/chat_func.py
deleted file mode 100644
index 4c635c51ada4a852d1495646cf81120de15af7b9..0000000000000000000000000000000000000000
--- a/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/chat_func.py
+++ /dev/null
@@ -1,497 +0,0 @@
-# -*- coding:utf-8 -*-
-from __future__ import annotations
-from typing import TYPE_CHECKING, List
-
-import logging
-import json
-import os
-import requests
-import urllib3
-
-from tqdm import tqdm
-import colorama
-from duckduckgo_search import ddg
-import asyncio
-import aiohttp
-
-
-from modules.presets import *
-from modules.llama_func import *
-from modules.utils import *
-from . import shared
-from modules.config import retrieve_proxy
-
-# logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s")
-
-if TYPE_CHECKING:
- from typing import TypedDict
-
- class DataframeData(TypedDict):
- headers: List[str]
- data: List[List[str | int | bool]]
-
-
-initial_prompt = "You are a helpful assistant."
-HISTORY_DIR = "history"
-TEMPLATES_DIR = "templates"
-
-@shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用
-def get_response(
- openai_api_key, system_prompt, history, temperature, top_p, stream, selected_model
-):
- headers = {
- "Content-Type": "application/json",
- "Authorization": f"Bearer {openai_api_key}",
- }
-
- history = [construct_system(system_prompt), *history]
-
- payload = {
- "model": selected_model,
- "messages": history, # [{"role": "user", "content": f"{inputs}"}],
- "temperature": temperature, # 1.0,
- "top_p": top_p, # 1.0,
- "n": 1,
- "stream": stream,
- "presence_penalty": 0,
- "frequency_penalty": 0,
- }
- if stream:
- timeout = timeout_streaming
- else:
- timeout = timeout_all
-
-
- # 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求
- if shared.state.completion_url != COMPLETION_URL:
- logging.info(f"使用自定义API URL: {shared.state.completion_url}")
-
- with retrieve_proxy():
- response = requests.post(
- shared.state.completion_url,
- headers=headers,
- json=payload,
- stream=True,
- timeout=timeout,
- )
-
- return response
-
-
-def stream_predict(
- openai_api_key,
- system_prompt,
- history,
- inputs,
- chatbot,
- all_token_counts,
- top_p,
- temperature,
- selected_model,
- fake_input=None,
- display_append=""
-):
- def get_return_value():
- return chatbot, history, status_text, all_token_counts
-
- logging.info("实时回答模式")
- partial_words = ""
- counter = 0
- status_text = "开始实时传输回答……"
- history.append(construct_user(inputs))
- history.append(construct_assistant(""))
- if fake_input:
- chatbot.append((fake_input, ""))
- else:
- chatbot.append((inputs, ""))
- user_token_count = 0
- if fake_input is not None:
- input_token_count = count_token(construct_user(fake_input))
- else:
- input_token_count = count_token(construct_user(inputs))
- if len(all_token_counts) == 0:
- system_prompt_token_count = count_token(construct_system(system_prompt))
- user_token_count = (
- input_token_count + system_prompt_token_count
- )
- else:
- user_token_count = input_token_count
- all_token_counts.append(user_token_count)
- logging.info(f"输入token计数: {user_token_count}")
- yield get_return_value()
- try:
- response = get_response(
- openai_api_key,
- system_prompt,
- history,
- temperature,
- top_p,
- True,
- selected_model,
- )
- except requests.exceptions.ConnectTimeout:
- status_text = (
- standard_error_msg + connection_timeout_prompt + error_retrieve_prompt
- )
- yield get_return_value()
- return
- except requests.exceptions.ReadTimeout:
- status_text = standard_error_msg + read_timeout_prompt + error_retrieve_prompt
- yield get_return_value()
- return
-
- yield get_return_value()
- error_json_str = ""
-
- if fake_input is not None:
- history[-2] = construct_user(fake_input)
- for chunk in tqdm(response.iter_lines()):
- if counter == 0:
- counter += 1
- continue
- counter += 1
- # check whether each line is non-empty
- if chunk:
- chunk = chunk.decode()
- chunklength = len(chunk)
- try:
- chunk = json.loads(chunk[6:])
- except json.JSONDecodeError:
- logging.info(chunk)
- error_json_str += chunk
- status_text = f"JSON解析错误。请重置对话。收到的内容: {error_json_str}"
- yield get_return_value()
- continue
- # decode each line as response data is in bytes
- if chunklength > 6 and "delta" in chunk["choices"][0]:
- finish_reason = chunk["choices"][0]["finish_reason"]
- status_text = construct_token_message(all_token_counts)
- if finish_reason == "stop":
- yield get_return_value()
- break
- try:
- partial_words = (
- partial_words + chunk["choices"][0]["delta"]["content"]
- )
- except KeyError:
- status_text = (
- standard_error_msg
- + "API回复中找不到内容。很可能是Token计数达到上限了。请重置对话。当前Token计数: "
- + str(sum(all_token_counts))
- )
- yield get_return_value()
- break
- history[-1] = construct_assistant(partial_words)
- chatbot[-1] = (chatbot[-1][0], partial_words+display_append)
- all_token_counts[-1] += 1
- yield get_return_value()
-
-
-def predict_all(
- openai_api_key,
- system_prompt,
- history,
- inputs,
- chatbot,
- all_token_counts,
- top_p,
- temperature,
- selected_model,
- fake_input=None,
- display_append=""
-):
- logging.info("一次性回答模式")
- history.append(construct_user(inputs))
- history.append(construct_assistant(""))
- if fake_input:
- chatbot.append((fake_input, ""))
- else:
- chatbot.append((inputs, ""))
- if fake_input is not None:
- all_token_counts.append(count_token(construct_user(fake_input)))
- else:
- all_token_counts.append(count_token(construct_user(inputs)))
- try:
- response = get_response(
- openai_api_key,
- system_prompt,
- history,
- temperature,
- top_p,
- False,
- selected_model,
- )
- except requests.exceptions.ConnectTimeout:
- status_text = (
- standard_error_msg + connection_timeout_prompt + error_retrieve_prompt
- )
- return chatbot, history, status_text, all_token_counts
- except requests.exceptions.ProxyError:
- status_text = standard_error_msg + proxy_error_prompt + error_retrieve_prompt
- return chatbot, history, status_text, all_token_counts
- except requests.exceptions.SSLError:
- status_text = standard_error_msg + ssl_error_prompt + error_retrieve_prompt
- return chatbot, history, status_text, all_token_counts
- response = json.loads(response.text)
- if fake_input is not None:
- history[-2] = construct_user(fake_input)
- try:
- content = response["choices"][0]["message"]["content"]
- history[-1] = construct_assistant(content)
- chatbot[-1] = (chatbot[-1][0], content+display_append)
- total_token_count = response["usage"]["total_tokens"]
- if fake_input is not None:
- all_token_counts[-1] += count_token(construct_assistant(content))
- else:
- all_token_counts[-1] = total_token_count - sum(all_token_counts)
- status_text = construct_token_message(total_token_count)
- return chatbot, history, status_text, all_token_counts
- except KeyError:
- status_text = standard_error_msg + str(response)
- return chatbot, history, status_text, all_token_counts
-
-
-def predict(
- openai_api_key,
- system_prompt,
- history,
- inputs,
- chatbot,
- all_token_counts,
- top_p,
- temperature,
- stream=False,
- selected_model=MODELS[0],
- use_websearch=False,
- files = None,
- reply_language="中文",
- should_check_token_count=True,
-): # repetition_penalty, top_k
- from llama_index.indices.vector_store.base_query import GPTVectorStoreIndexQuery
- from llama_index.indices.query.schema import QueryBundle
- from langchain.llms import OpenAIChat
-
-
- logging.info("输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL)
- if should_check_token_count:
- yield chatbot+[(inputs, "")], history, "开始生成回答……", all_token_counts
- if reply_language == "跟随问题语言(不稳定)":
- reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch."
- old_inputs = None
- display_reference = []
- limited_context = False
- if files:
- limited_context = True
- old_inputs = inputs
- msg = "加载索引中……(这可能需要几分钟)"
- logging.info(msg)
- yield chatbot+[(inputs, "")], history, msg, all_token_counts
- index = construct_index(openai_api_key, file_src=files)
- msg = "索引构建完成,获取回答中……"
- logging.info(msg)
- yield chatbot+[(inputs, "")], history, msg, all_token_counts
- with retrieve_proxy():
- llm_predictor = LLMPredictor(llm=OpenAIChat(temperature=0, model_name=selected_model))
- prompt_helper = PromptHelper(max_input_size = 4096, num_output = 5, max_chunk_overlap = 20, chunk_size_limit=600)
- from llama_index import ServiceContext
- service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
- query_object = GPTVectorStoreIndexQuery(index.index_struct, service_context=service_context, similarity_top_k=5, vector_store=index._vector_store, docstore=index._docstore)
- query_bundle = QueryBundle(inputs)
- nodes = query_object.retrieve(query_bundle)
- reference_results = [n.node.text for n in nodes]
- reference_results = add_source_numbers(reference_results, use_source=False)
- display_reference = add_details(reference_results)
- display_reference = "\n\n" + "".join(display_reference)
- inputs = (
- replace_today(PROMPT_TEMPLATE)
- .replace("{query_str}", inputs)
- .replace("{context_str}", "\n\n".join(reference_results))
- .replace("{reply_language}", reply_language )
- )
- elif use_websearch:
- limited_context = True
- search_results = ddg(inputs, max_results=5)
- old_inputs = inputs
- reference_results = []
- for idx, result in enumerate(search_results):
- logging.info(f"搜索结果{idx + 1}:{result}")
- domain_name = urllib3.util.parse_url(result["href"]).host
- reference_results.append([result["body"], result["href"]])
- display_reference.append(f"{idx+1}. [{domain_name}]({result['href']})\n")
- reference_results = add_source_numbers(reference_results)
- display_reference = "\n\n" + "".join(display_reference)
- inputs = (
- replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
- .replace("{query}", inputs)
- .replace("{web_results}", "\n\n".join(reference_results))
- .replace("{reply_language}", reply_language )
- )
- else:
- display_reference = ""
-
- if len(openai_api_key) == 0 and not shared.state.multi_api_key:
- status_text = standard_error_msg + no_apikey_msg
- logging.info(status_text)
- chatbot.append((inputs, ""))
- if len(history) == 0:
- history.append(construct_user(inputs))
- history.append("")
- all_token_counts.append(0)
- else:
- history[-2] = construct_user(inputs)
- yield chatbot+[(inputs, "")], history, status_text, all_token_counts
- return
- elif len(inputs.strip()) == 0:
- status_text = standard_error_msg + no_input_msg
- logging.info(status_text)
- yield chatbot+[(inputs, "")], history, status_text, all_token_counts
- return
-
- if stream:
- logging.info("使用流式传输")
- iter = stream_predict(
- openai_api_key,
- system_prompt,
- history,
- inputs,
- chatbot,
- all_token_counts,
- top_p,
- temperature,
- selected_model,
- fake_input=old_inputs,
- display_append=display_reference
- )
- for chatbot, history, status_text, all_token_counts in iter:
- if shared.state.interrupted:
- shared.state.recover()
- return
- yield chatbot, history, status_text, all_token_counts
- else:
- logging.info("不使用流式传输")
- chatbot, history, status_text, all_token_counts = predict_all(
- openai_api_key,
- system_prompt,
- history,
- inputs,
- chatbot,
- all_token_counts,
- top_p,
- temperature,
- selected_model,
- fake_input=old_inputs,
- display_append=display_reference
- )
- yield chatbot, history, status_text, all_token_counts
-
- logging.info(f"传输完毕。当前token计数为{all_token_counts}")
- if len(history) > 1 and history[-1]["content"] != inputs:
- logging.info(
- "回答为:"
- + colorama.Fore.BLUE
- + f"{history[-1]['content']}"
- + colorama.Style.RESET_ALL
- )
-
- if limited_context:
- history = history[-4:]
- all_token_counts = all_token_counts[-2:]
- yield chatbot, history, status_text, all_token_counts
-
- if stream:
- max_token = MODEL_SOFT_TOKEN_LIMIT[selected_model]["streaming"]
- else:
- max_token = MODEL_SOFT_TOKEN_LIMIT[selected_model]["all"]
-
- if sum(all_token_counts) > max_token and should_check_token_count:
- print(all_token_counts)
- count = 0
- while sum(all_token_counts) > max_token - 500 and sum(all_token_counts) > 0:
- count += 1
- del all_token_counts[0]
- del history[:2]
- logging.info(status_text)
- status_text = f"为了防止token超限,模型忘记了早期的 {count} 轮对话"
- yield chatbot, history, status_text, all_token_counts
-
-
-def retry(
- openai_api_key,
- system_prompt,
- history,
- chatbot,
- token_count,
- top_p,
- temperature,
- stream=False,
- selected_model=MODELS[0],
- reply_language="中文",
-):
- logging.info("重试中……")
- if len(history) == 0:
- yield chatbot, history, f"{standard_error_msg}上下文是空的", token_count
- return
- history.pop()
- inputs = history.pop()["content"]
- token_count.pop()
- iter = predict(
- openai_api_key,
- system_prompt,
- history,
- inputs,
- chatbot,
- token_count,
- top_p,
- temperature,
- stream=stream,
- selected_model=selected_model,
- reply_language=reply_language,
- )
- logging.info("重试中……")
- for x in iter:
- yield x
- logging.info("重试完毕")
-
-
-def reduce_token_size(
- openai_api_key,
- system_prompt,
- history,
- chatbot,
- token_count,
- top_p,
- temperature,
- max_token_count,
- selected_model=MODELS[0],
- reply_language="中文",
-):
- logging.info("开始减少token数量……")
- iter = predict(
- openai_api_key,
- system_prompt,
- history,
- summarize_prompt,
- chatbot,
- token_count,
- top_p,
- temperature,
- selected_model=selected_model,
- should_check_token_count=False,
- reply_language=reply_language,
- )
- logging.info(f"chatbot: {chatbot}")
- flag = False
- for chatbot, history, status_text, previous_token_count in iter:
- num_chat = find_n(previous_token_count, max_token_count)
- logging.info(f"previous_token_count: {previous_token_count}, keeping {num_chat} chats")
- if flag:
- chatbot = chatbot[:-1]
- flag = True
- history = history[-2*num_chat:] if num_chat > 0 else []
- token_count = previous_token_count[-num_chat:] if num_chat > 0 else []
- msg = f"保留了最近{num_chat}轮对话"
- yield chatbot, history, msg + "," + construct_token_message(
- token_count if len(token_count) > 0 else [0],
- ), token_count
- logging.info(msg)
- logging.info("减少token数量完毕")
diff --git a/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/overwrites.py b/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/overwrites.py
deleted file mode 100644
index a4ef6167eb7ce75ed8b88024ad1187b24f2fc191..0000000000000000000000000000000000000000
--- a/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/overwrites.py
+++ /dev/null
@@ -1,106 +0,0 @@
-from __future__ import annotations
-import logging
-
-from typing import List, Tuple
-from gradio_client import utils as client_utils
-from gradio import utils
-import inspect
-
-from modules.presets import *
-from modules.index_func import *
-
-
-def postprocess(
- self,
- y: List[List[str | Tuple[str] | Tuple[str, str] | None] | Tuple],
- ) -> List[List[str | Dict | None]]:
- """
- Parameters:
- y: List of lists representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. It can also be a tuple whose first element is a string filepath or URL to an image/video/audio, and second (optional) element is the alt text, in which case the media file is displayed. It can also be None, in which case that message is not displayed.
- Returns:
- List of lists representing the message and response. Each message and response will be a string of HTML, or a dictionary with media information. Or None if the message is not to be displayed.
- """
- if y is None:
- return []
- processed_messages = []
- for message_pair in y:
- assert isinstance(
- message_pair, (tuple, list)
- ), f"Expected a list of lists or list of tuples. Received: {message_pair}"
- assert (
- len(message_pair) == 2
- ), f"Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}"
-
- processed_messages.append(
- [
- self._postprocess_chat_messages(message_pair[0], "user"),
- self._postprocess_chat_messages(message_pair[1], "bot"),
- ]
- )
- return processed_messages
-
-def postprocess_chat_messages(
- self, chat_message: str | tuple | list | None, role: str
- ) -> str | dict | None:
- if chat_message is None:
- return None
- elif isinstance(chat_message, (tuple, list)):
- file_uri = chat_message[0]
- if utils.validate_url(file_uri):
- filepath = file_uri
- else:
- filepath = self.make_temp_copy_if_needed(file_uri)
-
- mime_type = client_utils.get_mimetype(filepath)
- return {
- "name": filepath,
- "mime_type": mime_type,
- "alt_text": chat_message[1] if len(chat_message) > 1 else None,
- "data": None, # These last two fields are filled in by the frontend
- "is_file": True,
- }
- elif isinstance(chat_message, str):
- # chat_message = inspect.cleandoc(chat_message)
- # escape html spaces
- # chat_message = chat_message.replace(" ", " ")
- if role == "bot":
- chat_message = convert_bot_before_marked(chat_message)
- elif role == "user":
- chat_message = convert_user_before_marked(chat_message)
- return chat_message
- else:
- raise ValueError(f"Invalid message for Chatbot component: {chat_message}")
-
-
-
-def add_classes_to_gradio_component(comp):
- """
- this adds gradio-* to the component for css styling (ie gradio-button to gr.Button), as well as some others
- code from stable-diffusion-webui
- """
-
- comp.elem_classes = [f"gradio-{comp.get_block_name()}", *(comp.elem_classes or [])]
-
- if getattr(comp, 'multiselect', False):
- comp.elem_classes.append('multiselect')
-
-
-def IOComponent_init(self, *args, **kwargs):
- res = original_IOComponent_init(self, *args, **kwargs)
- add_classes_to_gradio_component(self)
-
- return res
-
-original_IOComponent_init = gr.components.IOComponent.__init__
-gr.components.IOComponent.__init__ = IOComponent_init
-
-
-def BlockContext_init(self, *args, **kwargs):
- res = original_BlockContext_init(self, *args, **kwargs)
- add_classes_to_gradio_component(self)
-
- return res
-
-original_BlockContext_init = gr.blocks.BlockContext.__init__
-gr.blocks.BlockContext.__init__ = BlockContext_init
-
diff --git a/spaces/JunchuanYu/Sydney-AI/README.md b/spaces/JunchuanYu/Sydney-AI/README.md
deleted file mode 100644
index da224438be289e887e7e0c6ce434df5c6daa56e0..0000000000000000000000000000000000000000
--- a/spaces/JunchuanYu/Sydney-AI/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Sydney AI
-emoji: 🌖
-colorFrom: gray
-colorTo: blue
-sdk: gradio
-sdk_version: 3.19.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py b/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py
deleted file mode 100644
index 59229e0b0b0a18dff81abca6f5c20cb50b0d542c..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from typing import List, Optional, Tuple
-
-import torch
-from torch import Tensor
-
-from mmdet.registry import MODELS
-from mmdet.utils import ConfigType, OptMultiConfig
-from .base_roi_extractor import BaseRoIExtractor
-
-
-@MODELS.register_module()
-class SingleRoIExtractor(BaseRoIExtractor):
- """Extract RoI features from a single level feature map.
-
- If there are multiple input feature levels, each RoI is mapped to a level
- according to its scale. The mapping rule is proposed in
- `FPN `_.
-
- Args:
- roi_layer (:obj:`ConfigDict` or dict): Specify RoI layer type and
- arguments.
- out_channels (int): Output channels of RoI layers.
- featmap_strides (List[int]): Strides of input feature maps.
- finest_scale (int): Scale threshold of mapping to level 0.
- Defaults to 56.
- init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \
- dict], optional): Initialization config dict. Defaults to None.
- """
-
- def __init__(self,
- roi_layer: ConfigType,
- out_channels: int,
- featmap_strides: List[int],
- finest_scale: int = 56,
- init_cfg: OptMultiConfig = None) -> None:
- super().__init__(
- roi_layer=roi_layer,
- out_channels=out_channels,
- featmap_strides=featmap_strides,
- init_cfg=init_cfg)
- self.finest_scale = finest_scale
-
- def map_roi_levels(self, rois: Tensor, num_levels: int) -> Tensor:
- """Map rois to corresponding feature levels by scales.
-
- - scale < finest_scale * 2: level 0
- - finest_scale * 2 <= scale < finest_scale * 4: level 1
- - finest_scale * 4 <= scale < finest_scale * 8: level 2
- - scale >= finest_scale * 8: level 3
-
- Args:
- rois (Tensor): Input RoIs, shape (k, 5).
- num_levels (int): Total level number.
-
- Returns:
- Tensor: Level index (0-based) of each RoI, shape (k, )
- """
- scale = torch.sqrt(
- (rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))
- target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
- target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
- return target_lvls
-
- def forward(self,
- feats: Tuple[Tensor],
- rois: Tensor,
- roi_scale_factor: Optional[float] = None):
- """Extractor ROI feats.
-
- Args:
- feats (Tuple[Tensor]): Multi-scale features.
- rois (Tensor): RoIs with the shape (n, 5) where the first
- column indicates batch id of each RoI.
- roi_scale_factor (Optional[float]): RoI scale factor.
- Defaults to None.
-
- Returns:
- Tensor: RoI feature.
- """
- # convert fp32 to fp16 when amp is on
- rois = rois.type_as(feats[0])
- out_size = self.roi_layers[0].output_size
- num_levels = len(feats)
- roi_feats = feats[0].new_zeros(
- rois.size(0), self.out_channels, *out_size)
-
- # TODO: remove this when parrots supports
- if torch.__version__ == 'parrots':
- roi_feats.requires_grad = True
-
- if num_levels == 1:
- if len(rois) == 0:
- return roi_feats
- return self.roi_layers[0](feats[0], rois)
-
- target_lvls = self.map_roi_levels(rois, num_levels)
-
- if roi_scale_factor is not None:
- rois = self.roi_rescale(rois, roi_scale_factor)
-
- for i in range(num_levels):
- mask = target_lvls == i
- inds = mask.nonzero(as_tuple=False).squeeze(1)
- if inds.numel() > 0:
- rois_ = rois[inds]
- roi_feats_t = self.roi_layers[i](feats[i], rois_)
- roi_feats[inds] = roi_feats_t
- else:
- # Sometimes some pyramid levels will not be used for RoI
- # feature extraction and this will cause an incomplete
- # computation graph in one GPU, which is different from those
- # in other GPUs and will cause a hanging error.
- # Therefore, we add it to ensure each feature pyramid is
- # included in the computation graph to avoid runtime bugs.
- roi_feats += sum(
- x.view(-1)[0]
- for x in self.parameters()) * 0. + feats[i].sum() * 0.
- return roi_feats
diff --git a/spaces/LINOlk/Akak/Dockerfile b/spaces/LINOlk/Akak/Dockerfile
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Lamai/LAMAIGPT/autogpt/commands/analyze_code.py b/spaces/Lamai/LAMAIGPT/autogpt/commands/analyze_code.py
deleted file mode 100644
index e02ea4c5b4ba53530e559d1cab7a07b8e3c7c638..0000000000000000000000000000000000000000
--- a/spaces/Lamai/LAMAIGPT/autogpt/commands/analyze_code.py
+++ /dev/null
@@ -1,25 +0,0 @@
-"""Code evaluation module."""
-from __future__ import annotations
-
-from autogpt.llm_utils import call_ai_function
-
-
-def analyze_code(code: str) -> list[str]:
- """
- A function that takes in a string and returns a response from create chat
- completion api call.
-
- Parameters:
- code (str): Code to be evaluated.
- Returns:
- A result string from create chat completion. A list of suggestions to
- improve the code.
- """
-
- function_string = "def analyze_code(code: str) -> List[str]:"
- args = [code]
- description_string = (
- "Analyzes the given code and returns a list of suggestions" " for improvements."
- )
-
- return call_ai_function(function_string, args, description_string)
diff --git a/spaces/Lbin123/Lbingo/src/components/voice.tsx b/spaces/Lbin123/Lbingo/src/components/voice.tsx
deleted file mode 100644
index 074d0e145229947282a472bd84f6578cf0b3c71c..0000000000000000000000000000000000000000
--- a/spaces/Lbin123/Lbingo/src/components/voice.tsx
+++ /dev/null
@@ -1,52 +0,0 @@
-import React, { useEffect } from 'react'
-import { useSetAtom } from 'jotai'
-import { useBing } from '@/lib/hooks/use-bing'
-import Image from 'next/image'
-import VoiceIcon from '@/assets/images/voice.svg'
-import VoiceButton from './ui/voice'
-import { SR } from '@/lib/bots/bing/sr'
-import { voiceListenAtom } from '@/state'
-
-const sr = new SR(['发送', '清空', '退出'])
-
-const Voice = ({ setInput, input, sendMessage, isSpeaking }: Pick, 'setInput' | 'sendMessage' | 'input' | 'isSpeaking'>) => {
- const setListen = useSetAtom(voiceListenAtom)
- useEffect(() => {
- if (sr.listening) return
- sr.transcript = !isSpeaking
- }, [isSpeaking])
-
- useEffect(() => {
- sr.onchange = (msg: string, command?: string) => {
- switch (command) {
- case '退出':
- sr.stop()
- break;
- case '发送':
- sendMessage(input)
- case '清空':
- setInput('')
- break;
- default:
- setInput(input + msg)
- }
- }
- }, [input])
-
- const switchSR = (enable: boolean = false) => {
- setListen(enable)
- if (enable) {
- sr.start()
- } else {
- sr.stop()
- }
- }
-
- return sr.listening ? (
- switchSR(false)} />
- ) : (
- switchSR(true)} />
- )
-};
-
-export default Voice;
diff --git a/spaces/Lewislou/Lewislou-cell-seg-sribd/sribd_cellseg_models.py b/spaces/Lewislou/Lewislou-cell-seg-sribd/sribd_cellseg_models.py
deleted file mode 100644
index 9c65523b2963a877e578cf49612a3eba194c05f3..0000000000000000000000000000000000000000
--- a/spaces/Lewislou/Lewislou-cell-seg-sribd/sribd_cellseg_models.py
+++ /dev/null
@@ -1,100 +0,0 @@
-
-import os
-join = os.path.join
-import argparse
-import numpy as np
-import torch
-import torch.nn as nn
-from collections import OrderedDict
-from torchvision import datasets, models, transforms
-from classifiers import resnet10, resnet18
-
-from utils_modify import sliding_window_inference,sliding_window_inference_large,__proc_np_hv
-from PIL import Image
-import torch.nn.functional as F
-from skimage import io, segmentation, morphology, measure, exposure
-import tifffile as tif
-from models.flexible_unet_convnext import FlexibleUNet_star,FlexibleUNet_hv
-from transformers import PretrainedConfig
-from typing import List
-from transformers import PreTrainedModel
-from huggingface_hub import PyTorchModelHubMixin
-from torch import nn
-class ModelConfig(PretrainedConfig):
- model_type = "cell_sribd"
- def __init__(
- self,
- version = 1,
- input_channels: int = 3,
- roi_size: int = 512,
- overlap: float = 0.5,
- device: str = 'cpu',
- **kwargs,
- ):
-
- self.device = device
- self.roi_size = (roi_size, roi_size)
- self.input_channels = input_channels
- self.overlap = overlap
- self.np_thres, self.ksize, self.overall_thres, self.obj_size_thres = 0.6, 15, 0.4, 100
- self.n_rays = 32
- self.sw_batch_size = 4
- self.num_classes= 4
- self.block_size = 2048
- self.min_overlap = 128
- self.context = 128
- super().__init__(**kwargs)
-
-
-class MultiStreamCellSegModel(PreTrainedModel):
- config_class = ModelConfig
- #print(config.input_channels)
- def __init__(self, config):
- super().__init__(config)
- #print(config.input_channels)
- self.config = config
- self.cls_model = resnet18()
- self.model0 = FlexibleUNet_star(in_channels=config.input_channels,out_channels=config.n_rays+1,backbone='convnext_small',pretrained=False,n_rays=config.n_rays,prob_out_channels=1,)
- self.model1 = FlexibleUNet_star(in_channels=config.input_channels,out_channels=config.n_rays+1,backbone='convnext_small',pretrained=False,n_rays=config.n_rays,prob_out_channels=1,)
- self.model2 = FlexibleUNet_star(in_channels=config.input_channels,out_channels=config.n_rays+1,backbone='convnext_small',pretrained=False,n_rays=config.n_rays,prob_out_channels=1,)
- self.model3 = FlexibleUNet_hv(in_channels=config.input_channels,out_channels=2+2,backbone='convnext_small',pretrained=False,n_rays=2,prob_out_channels=2,)
- self.preprocess=transforms.Compose([
- transforms.Resize(size=256),
- transforms.CenterCrop(size=224),
- transforms.ToTensor(),
- transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])])
- def load_checkpoints(self,checkpoints):
- self.cls_model.load_state_dict(checkpoints['cls_model'])
- self.model0.load_state_dict(checkpoints['class1_model']['model_state_dict'])
- self.model1.load_state_dict(checkpoints['class2_model']['model_state_dict'])
- self.model2.load_state_dict(checkpoints['class3_model']['model_state_dict'])
- self.model3.load_state_dict(checkpoints['class4_model'])
-
- def forward(self, pre_img_data):
- inputs=self.preprocess(Image.fromarray(pre_img_data)).unsqueeze(0)
- outputs = self.cls_model(inputs)
- _, preds = torch.max(outputs, 1)
- label=preds[0].cpu().numpy()
- test_npy01 = pre_img_data
- if label in [0,1,2]:
- if label == 0:
- output_label = sliding_window_inference_large(test_npy01,self.config.block_size,self.config.min_overlap,self.config.context, self.config.roi_size,self.config.sw_batch_size,predictor=self.model0,device=self.config.device)
- elif label == 1:
- output_label = sliding_window_inference_large(test_npy01,self.config.block_size,self.config.min_overlap,self.config.context, self.config.roi_size,self.config.sw_batch_size,predictor=self.model1,device=self.config.device)
- elif label == 2:
- output_label = sliding_window_inference_large(test_npy01,self.config.block_size,self.config.min_overlap,self.config.context, self.config.roi_size,self.config.sw_batch_size,predictor=self.model2,device=self.config.device)
- else:
- test_tensor = torch.from_numpy(np.expand_dims(test_npy01, 0)).permute(0, 3, 1, 2).type(torch.FloatTensor)
-
- output_hv, output_np = sliding_window_inference(test_tensor, self.config.roi, self.config.sw_batch_size, self.model3, overlap=self.config.overlap,device=self.config.device)
- pred_dict = {'np': output_np, 'hv': output_hv}
- pred_dict = OrderedDict(
- [[k, v.permute(0, 2, 3, 1).contiguous()] for k, v in pred_dict.items()] # NHWC
- )
- pred_dict["np"] = F.softmax(pred_dict["np"], dim=-1)[..., 1:]
- pred_output = torch.cat(list(pred_dict.values()), -1).cpu().numpy() # NHW3
- pred_map = np.squeeze(pred_output) # HW3
- pred_inst = __proc_np_hv(pred_map, self.config.np_thres, self.config.ksize, self.config.overall_thres, self.config.obj_size_thres)
- raw_pred_shape = pred_inst.shape[:2]
- output_label = pred_inst
- return output_label
diff --git a/spaces/LinkSoul/Chinese-LLaVa/static/js/bulma-slider.min.js b/spaces/LinkSoul/Chinese-LLaVa/static/js/bulma-slider.min.js
deleted file mode 100644
index 7e62685763cf7668cfa8857fac0b27af2c277286..0000000000000000000000000000000000000000
--- a/spaces/LinkSoul/Chinese-LLaVa/static/js/bulma-slider.min.js
+++ /dev/null
@@ -1 +0,0 @@
-!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.bulmaSlider=e():t.bulmaSlider=e()}("undefined"!=typeof self?self:this,function(){return function(n){var r={};function i(t){if(r[t])return r[t].exports;var e=r[t]={i:t,l:!1,exports:{}};return n[t].call(e.exports,e,e.exports,i),e.l=!0,e.exports}return i.m=n,i.c=r,i.d=function(t,e,n){i.o(t,e)||Object.defineProperty(t,e,{configurable:!1,enumerable:!0,get:n})},i.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return i.d(e,"a",e),e},i.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},i.p="",i(i.s=0)}([function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),n.d(e,"isString",function(){return l});var r=n(1),i=Object.assign||function(t){for(var e=1;e=l.length&&(s=!0)):s=!0),s&&(t.once&&(u[e]=null),t.callback(r))});-1!==u.indexOf(null);)u.splice(u.indexOf(null),1)}}]),e}();e.a=i}]).default});
\ No newline at end of file
diff --git a/spaces/LunchWithaLens/whichraptor/README.md b/spaces/LunchWithaLens/whichraptor/README.md
deleted file mode 100644
index 9591597eb64b616599773980970a723223da72cd..0000000000000000000000000000000000000000
--- a/spaces/LunchWithaLens/whichraptor/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Whichraptor
-emoji: 🐢
-colorFrom: green
-colorTo: purple
-sdk: gradio
-sdk_version: 2.9.4
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/Manjushri/SDXL-1.0/app.py b/spaces/Manjushri/SDXL-1.0/app.py
deleted file mode 100644
index 47fa2ca2957b23a49396e71318fb4be7a02e5e28..0000000000000000000000000000000000000000
--- a/spaces/Manjushri/SDXL-1.0/app.py
+++ /dev/null
@@ -1,65 +0,0 @@
-import gradio as gr
-import torch
-import numpy as np
-import modin.pandas as pd
-from PIL import Image
-from diffusers import DiffusionPipeline
-
-device = 'cuda' if torch.cuda.is_available() else 'cpu'
-
-if torch.cuda.is_available():
- PYTORCH_CUDA_ALLOC_CONF={'max_split_size_mb': 8000}
- torch.cuda.max_memory_allocated(device=device)
- torch.cuda.empty_cache()
-
- pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
- pipe.enable_xformers_memory_efficient_attention()
- pipe = pipe.to(device)
- torch.cuda.empty_cache()
-
- refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16")
- refiner.enable_xformers_memory_efficient_attention()
- refiner = refiner.to(device)
- torch.cuda.empty_cache()
-
- upscaler = DiffusionPipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16, use_safetensors=True)
- upscaler.enable_xformers_memory_efficient_attention()
- upscaler = upscaler.to(device)
- torch.cuda.empty_cache()
-else:
- pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", use_safetensors=True)
- pipe = pipe.to(device)
- pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
- refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True)
- refiner = refiner.to(device)
- refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
-
-def genie (prompt, negative_prompt, height, width, scale, steps, seed, upscaling, prompt_2, negative_prompt_2, high_noise_frac, n_steps):
- generator = np.random.seed(0) if seed == 0 else torch.manual_seed(seed)
- int_image = pipe(prompt, prompt_2=prompt_2, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, num_inference_steps=steps, height=height, width=width, guidance_scale=scale, num_images_per_prompt=1, generator=generator, output_type="latent").images
- if upscaling == 'Yes':
- image = refiner(prompt=prompt, prompt_2=prompt_2, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, image=int_image, num_inference_steps=n_steps, denoising_start=high_noise_frac).images[0] #num_inference_steps=n_steps,
- upscaled = upscaler(prompt=prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=5, guidance_scale=0).images[0]
- torch.cuda.empty_cache()
- return (image, upscaled)
- else:
- image = refiner(prompt=prompt, prompt_2=prompt_2, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, image=int_image, num_inference_steps=n_steps ,denoising_start=high_noise_frac).images[0]
- torch.cuda.empty_cache()
- return (image, image)
-
-gr.Interface(fn=genie, inputs=[gr.Textbox(label='What you want the AI to generate. 77 Token Limit. A Token is Any Word, Number, Symbol, or Punctuation. Everything Over 77 Will Be Truncated!'),
- gr.Textbox(label='What you Do Not want the AI to generate. 77 Token Limit'),
- gr.Slider(512, 1024, 768, step=128, label='Height'),
- gr.Slider(512, 1024, 768, step=128, label='Width'),
- gr.Slider(1, 15, 10, step=.25, label='Guidance Scale: How Closely the AI follows the Prompt'),
- gr.Slider(25, maximum=100, value=50, step=25, label='Number of Iterations'),
- gr.Slider(minimum=0, step=1, maximum=999999999999999999, randomize=True, label='Seed: 0 is Random'),
- gr.Radio(['Yes', 'No'], value='No', label='Upscale?'),
- gr.Textbox(label='Embedded Prompt'),
- gr.Textbox(label='Embedded Negative Prompt'),
- gr.Slider(minimum=.7, maximum=.99, value=.95, step=.01, label='Refiner Denoise Start %'),
- gr.Slider(minimum=1, maximum=100, value=100, step=1, label='Refiner Number of Iterations %')],
- outputs=['image', 'image'],
- title="Stable Diffusion XL 1.0 GPU",
- description="SDXL 1.0 GPU.
WARNING: Capable of producing NSFW (Softcore) images.",
- article = "If You Enjoyed this Demo and would like to Donate, you can send to any of these Wallets. BTC: bc1qzdm9j73mj8ucwwtsjx4x4ylyfvr6kp7svzjn84 3LWRoKYx6bCLnUrKEdnPo3FCSPQUSFDjFP DOGE: DK6LRc4gfefdCTRk9xPD239N31jh9GjKez SHIB (BEP20): 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 PayPal: https://www.paypal.me/ManjushriBodhisattva ETH: 0xbE8f2f3B71DFEB84E5F7E3aae1909d60658aB891 Code Monkey: Manjushri").launch(debug=True, max_threads=80)
diff --git a/spaces/Matthijs/mms-tts-demo/app.py b/spaces/Matthijs/mms-tts-demo/app.py
deleted file mode 100644
index 7ab5fd34984c3f1222e6215518b45b37144dff04..0000000000000000000000000000000000000000
--- a/spaces/Matthijs/mms-tts-demo/app.py
+++ /dev/null
@@ -1,140 +0,0 @@
-import gradio as gr
-import numpy as np
-import torch
-import os
-import re
-import tempfile
-
-from transformers import VitsModel, VitsTokenizer
-
-
-models = {
- "English": VitsModel.from_pretrained("Matthijs/mms-tts-eng"),
- "German": VitsModel.from_pretrained("Matthijs/mms-tts-deu"),
- "Korean": VitsModel.from_pretrained("Matthijs/mms-tts-kor"),
-}
-
-tokenizers = {
- "English": VitsTokenizer.from_pretrained("Matthijs/mms-tts-eng"),
- "German": VitsTokenizer.from_pretrained("Matthijs/mms-tts-deu"),
- "Korean": VitsTokenizer.from_pretrained("Matthijs/mms-tts-kor"),
-}
-
-
-# For certain checkpoints, the text needs to be romanized.
-# MMS-TTS uses uromanize.pl for this from https://github.com/isi-nlp/uroman
-# This needs to be installed in the folder "uroman"
-def uromanize(text, uroman_pl):
- iso = "xxx"
- with tempfile.NamedTemporaryFile() as tf, tempfile.NamedTemporaryFile() as tf2:
- with open(tf.name, "w") as f:
- f.write("\n".join([text]))
- cmd = f"perl " + uroman_pl
- cmd += f" -l {iso} "
- cmd += f" < {tf.name} > {tf2.name}"
- os.system(cmd)
- outtexts = []
- with open(tf2.name) as f:
- for line in f:
- line = re.sub(r"\s+", " ", line).strip()
- outtexts.append(line)
- outtext = outtexts[0]
- return outtext
-
-
-def predict(text, language=None):
- if len(text.strip()) == 0:
- return (16000, np.zeros(0).astype(np.int16))
-
- if language == "Korean":
- uroman_pl = os.path.join("uroman", "bin", "uroman.pl")
- text = uromanize(text, uroman_pl)
-
- tokenizer = tokenizers[language]
- inputs = tokenizer(text, return_tensors="pt")
- input_ids = inputs["input_ids"]
-
- if language != "Korean":
- text = tokenizer.batch_decode(input_ids)[0]
-
- model = models[language]
- with torch.no_grad():
- outputs = model(input_ids)
-
- speech = outputs.audio[0]
- speech = (speech.numpy() * 32767).astype(np.int16)
- return (16000, speech), text
-
-
-title = "MMS-TTS speech synthesis"
-
-description = """
-Facebook's [Massively Multilingual Speech](https://arxiv.org/abs/2305.13516) project aims to provide
-speech technology across a diverse range of languages. The MMS-TTS project contains a collection of
-over 1000 text-to-speech (TTS) models.
-
-This demo shows how to use MMS-TTS using 🤗 Transformers. Since MMS-TTS is based on the VITS
-model, this code can also be used to run VITS checkpoints.
-For a full list of checkpoints, [click here](https://huggingface.co/models?filter=vits).
-
-As the model performs random sampling, the generated speech is slightly different each time.
-The voice may also vary between runs, or sometimes even in the same sentence.
-(Note that 🤗 Transformers also supports multispeaker VITS checkpoints but the MMS-TTS checkpoints
-are not conditioned on a speaker ID.)
-"""
-
-article = """
-
-@article{pratap2023mms,
- title={Scaling Speech Technology to 1,000+ Languages},
- author={Vineel Pratap and Andros Tjandra and Bowen Shi and Paden Tomasello and Arun Babu and Sayani Kundu and Ali Elkahky and Zhaoheng Ni and Apoorv Vyas and Maryam Fazel-Zarandi and Alexei Baevski and Yossi Adi and Xiaohui Zhang and Wei-Ning Hsu and Alexis Conneau and Michael Auli},
- journal={arXiv},
- year={2023}
-}
-