diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Autodata 3.38 Magyar How to Download and Update This Software for Free.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Autodata 3.38 Magyar How to Download and Update This Software for Free.md
deleted file mode 100644
index cee67342deaab40456795b7a00ac451c463f5388..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Autodata 3.38 Magyar How to Download and Update This Software for Free.md
+++ /dev/null
@@ -1,129 +0,0 @@
-
-
What is Autodata 3.38 Magyar and why do you need it?
-
If you are a professional or a hobbyist in the automotive industry, you know how important it is to have accurate and up-to-date information about vehicles, parts, and repairs. Whether you are working on a car, a motorcycle, a truck, or a tractor, you need a reliable source of data that can help you diagnose problems, perform maintenance, and find solutions.
-
That's where Autodata 3.38 Magyar comes in handy. Autodata 3.38 Magyar is a powerful software application developed by Melville-Schellmann that provides comprehensive and detailed technical information for automotive repair professionals. It is designed to run on a CD and can be used on both PC and Mac platforms. It is also available in Hungarian language, which makes it easier for users in Hungary and other regions where Hungarian is spoken.
Autodata 3.38 Magyar has many benefits that can make your automotive work easier and more efficient. Here are some of them:
-
-
It covers over 17,000 models from over 80 manufacturers worldwide. You can find information about cars, motorcycles, light commercial vehicles, heavy commercial vehicles, agricultural vehicles, industrial vehicles, and more.
-
It provides technical specifications, wiring diagrams, service schedules, diagnostic trouble codes, repair times, labor costs, component locations, torque settings, and more. You can access all the information you need in one place.
-
It updates regularly with new data and features. You can always have the latest information available for your work.
-
It is easy to use and navigate. You can search by vehicle make, model, engine code, VIN number, or registration number. You can also use filters and keywords to narrow down your search results.
-
It is compatible with other software applications. You can export data to PDF files or print them out for your convenience.
-
-
With Autodata 3.38 Magyar, you can have a reliable and comprehensive database of vehicle information at your fingertips.
-
How to install Autodata 3.38 Magyar on your computer?
-
If you want to use Autodata 3.38 Magyar on your computer,
you need to follow these steps:
-
-
Download the Autodata 3.38 Magyar CD image from a reliable source. You can use the link provided by MOTORCARSOFT.COM or Google Drive. Make sure you have enough space on your hard drive to store the file.
-
Extract the CD image using a software like WinRAR or 7-Zip. You will get a folder named "Autodata 3.38 (2011)" with several files inside.
-
Run "Install_x86" or "Install_x64" depending on your OS (32 or 64 bit). Follow the instructions on the console screen and wait for the installation to complete.
-
Restart your computer when prompted. This is important for Windows 7/8/8.1/10 users but not for XP users.
-
Run "dseo13b.exe" as administrator (<<< this is important). This is a tool that allows you to sign drivers and enable test mode on your computer.
-
Select "Enable Test Mode" and click "Next". Then select "Sign a System File" and click "Next". Enter "C:\windows\system32\drivers\etc\hosts" as the file name and click "OK". Repeat this step for "C:\windows\system32\drivers\atdcm64a.sys" if you have a 64 bit OS or "C:\windows\system32\drivers\atdcm32a.sys" if you have a 32 bit OS.
-
Select "Exit" and restart your computer when prompted.
-
Run "RegSettings_x86.reg" or "RegSettings_x64.reg" depending on your OS (32 or 64 bit). This will add some registry entries to your system.
-
Run "ADBCD.exe" as administrator (<<< this is important). This will activate your Autodata 3.38 Magyar software.
-
-
Congratulations, you have successfully installed Autodata 3.38 Magyar on your computer. You can now start using it for your automotive tasks.
-
autodata 3.38 magyar language pack
-autodata 3.38 magyar download
-autodata 3.38 magyar free
-autodata 3.38 magyar crack
-autodata 3.38 magyar telepítés
-autodata 3.38 magyar letöltés ingyen
-autodata 3.38 magyar használata
-autodata 3.38 magyar online
-autodata 3.38 magyar torrent
-autodata 3.38 magyar windows 10
-autodata 3.38 magyar iso
-autodata 3.38 magyar serial
-autodata 3.38 magyar keygen
-autodata 3.38 magyar full
-autodata 3.38 magyar google drive
-autodata 3.38 magyar trello
-autodata 3.38 magyar soundcloud
-autodata 3.38 magyar wixsite
-autodata 3.38 magyar rendszerkövetelmények
-autodata 3.38 magyar frissítés
-autodata 3.38 magyar hibaüzenetek
-autodata 3.38 magyar adatbázis
-autodata 3.38 magyar szervizkönyv
-autodata 3.38 magyar javítási útmutatók
-autodata 3.38 magyar műszaki adatok
-autodata 3.38 magyar áramkörök
-autodata 3.38 magyar diagnosztika
-autodata 3.38 magyar kódolások
-autodata 3.38 magyar beállítások
-autodata 3.38 magyar karbantartások
-autodata 3.38 magyar alkatrészek
-autodata 3.38 magyar árak
-autodata 3.38 magyar vélemények
-autodata 3.38 magyar fórumok
-autodata 3.38 magyar videók
-autodata 3.38 magyar képek
-autodata 3.38 magyar pdf
-autodata 3.38 magyar excel
-autodata 3.38 magyar word
-autodata 3.38 magyar powerpoint
-autodata 3.38 magyar access
-autodata 3.38 magyar outlook
-autodata 3.38 magyar mac os x
-autodata 3.38 magyar linux
-autodata 3.38 magyar android
-autodata 3.38 magyar ios
-autodata 3.38 magyar windows phone
-autodata 3.38 magyar blackberry
-autodata 3.38 magyar nokia
-autodata 3.38 magyar samsung
-
How to use Autodata 3.38 Magyar for your automotive tasks?
-
Autodata 3.38 Magyar is a user-friendly and comprehensive software that can help you with various automotive tasks. Here are some of the main features and functions of Autodata 3.38 Magyar and how to use them:
-
-
Technical specifications: You can access technical data for over 17,000 models from over 80 manufacturers worldwide. You can find information such as engine code, fuel type, power, torque, compression ratio, bore, stroke, valve clearance, ignition timing, fuel pressure, oil pressure, coolant temperature, etc. To access this feature, select "Technical Data" from the main menu and choose a vehicle make, model, and engine code. You can also use the search function to find a specific vehicle by VIN number or registration number.
-
Wiring diagrams: You can view wiring diagrams for various systems and components of a vehicle. You can find diagrams for ignition system, fuel injection system, cooling system, air conditioning system, lighting system, instrument panel, etc. To access this feature, select "Wiring Diagrams" from the main menu and choose a vehicle make, model, and engine code. You can also use the search function to find a specific system or component by name.
-
Service schedules: You can view service schedules for different vehicles and intervals. You can find information such as service type, mileage, time, operations, parts required, etc. To access this feature,
select "Service Schedules" from the main menu and choose a vehicle make, model, and engine code. You can also use the search function to find a specific service interval by mileage or time.
-
Diagnostic trouble codes: You can view diagnostic trouble codes for various systems and components of a vehicle. You can find information such as code number, description, possible causes, and solutions. To access this feature, select "Diagnostic Trouble Codes" from the main menu and choose a vehicle make, model, and engine code. You can also use the search function to find a specific code by number or name.
-
Repair times: You can view repair times for different operations and tasks on a vehicle. You can find information such as operation name, labor time, skill level, and tools required. To access this feature, select "Repair Times" from the main menu and choose a vehicle make, model, and engine code. You can also use the search function to find a specific operation by name or category.
-
Labor costs: You can view labor costs for different operations and tasks on a vehicle. You can find information such as operation name, labor cost, currency, and VAT rate. To access this feature, select "Labor Costs" from the main menu and choose a vehicle make, model, and engine code. You can also use the search function to find a specific operation by name or category.
-
Component locations: You can view component locations for various systems and components of a vehicle. You can find information such as component name, location diagram, and notes. To access this feature, select "Component Locations" from the main menu and choose a vehicle make, model, and engine code. You can also use the search function to find a specific component by name or system.
-
-
With Autodata 3.38 Magyar, you can have access to a wealth of information that can help you with your automotive tasks.
-
How to troubleshoot Autodata 3.38 Magyar if you encounter any problems?
-
Autodata 3.38 Magyar is a reliable and stable software that works smoothly on most computers. However, if you encounter any problems with Autodata 3.38 Magyar, such as error messages, missing data, or slow performance, you can try these tips and tricks to solve them:
-
-
Check your system requirements and compatibility issues. Make sure your computer meets the minimum system requirements for Autodata 3.38 Magyar. Also make sure your computer is compatible with Autodata 3.38 Magyar. For example, Autodata 3.38 Magyar does not work on Windows 10. If you have Windows 10, you need to upgrade to Autodata 3.45.
-
Check your regional settings and language pack. Make sure your regional settings are set to English US. Also make sure you have installed the Hungarian language pack for Autodata 3.38 Magyar. If you don't have it,
you can download it from a trusted source. You can use the link provided by Hugging Face or Docker. Follow the instructions on how to install the language pack on your computer.
-
Check your internet connection and firewall settings. Make sure you have a stable and fast internet connection to access the online data and updates for Autodata 3.38 Magyar. Also make sure your firewall settings allow Autodata 3.38 Magyar to connect to the internet and do not block its ports or processes.
-
Check your CD drive and CD image. Make sure your CD drive is working properly and can read the Autodata 3.38 Magyar CD image without errors. Also make sure your CD image is not corrupted or damaged. You can use a software like WinRAR or 7-Zip to check the integrity of the CD image file.
-
Contact customer support or visit online forums. If none of the above tips and tricks work for you, you can contact the customer support or visit the online forums for Autodata 3.38 Magyar. You can find contact details and links to forums on the official website of Autodata 3.38 Magyar. You can also visit other websites that offer help and advice for Autodata 3.38 Magyar users, such as MOTORCARSOFT.COM or carsoftos.com.
-
-
With these tips and tricks, you can troubleshoot Autodata 3.38 Magyar and enjoy its features without any problems.
-
Conclusion
-
Autodata 3.38 Magyar is a powerful and comprehensive software that provides technical information for automotive repair professionals. It covers over 17,000 models from over 80 manufacturers worldwide and offers features such as technical specifications, wiring diagrams, service schedules, diagnostic trouble codes, repair times, labor costs, and component locations. It also updates regularly with new data and features.
-
Autodata 3.38 Magyar is easy to install and use on your computer. You just need to follow some simple steps and check some system requirements and compatibility issues. If you encounter any problems with Autodata 3.38 Magyar, you can try some tips and tricks or contact customer support or visit online forums for help.
-
Autodata 3.38 Magyar is a valuable tool that can help you with your automotive tasks. It can save you time, money, and effort by providing you with accurate and up-to-date information about vehicles, parts, and repairs. It can also improve your skills and knowledge by giving you access to a wealth of information that can help you diagnose problems, perform maintenance, and find solutions.
-
If you are interested in Autodata 3.38 Magyar, you can try it for yourself and see how it can improve your automotive work. You can download it from a reliable source or buy it from an authorized dealer. You can also upgrade to Autodata 3.45 if you want more features and compatibility with Windows 10.
-
Thank you for reading this article on Autodata 3.38 Magyar. We hope you found it useful and informative.
-
FAQs
-
Here are some frequently asked questions about Autodata 3.38 Magyar:
-
-
What is the difference between Autodata 3.38 Magyar and Autodata 3.45?
-
Autodata 3.38 Magyar is an older version of Autodata that was released in 2011. It has some limitations such as not working on Windows 10 and not having some new data and features that are available in Autodata 3.45. Autodata 3.45 is a newer version of Autodata that was released in 2014. It has more features and compatibility with Windows 10 and other operating systems.
-
How much does Autodata 3.38 Magyar cost?
-
The price of Autodata 3.38 Magyar depends on where you buy it from and what type of license you choose. You can buy it from an authorized dealer or download it from a reliable source online. You can choose between a single-user license or a multi-user license depending on how many computers you want to use it on. The price may vary depending on the currency, VAT rate, and other factors.
-
How do I update Autodata 3.38 Magyar?
-
You can update Autodata 3.38 Magyar by connecting to the internet and running the software. The software will automatically check for updates and download them if available. You can also manually check for updates by selecting "Check for Updates" from the main menu. You need to have a valid license and an active subscription to access the updates.
-
How do I uninstall Autodata 3.38 Magyar?
-
You can uninstall Autodata 3.38 Magyar by following these steps:
-
-
Run "Uninstall_x86" or "Uninstall_x64" depending on your OS (32 or 64 bit).
-
Follow the instructions on the console screen and wait for the uninstallation to complete.
-
Restart your computer when prompted.
-
Delete the folder "Autodata 3.38 (2011)" from your hard drive.
-
Delete any shortcuts or icons related to Autodata 3.38 Magyar from your desktop or start menu.
-
-
Where can I find more information about Autodata 3.38 Magyar?
-
You can find more information about Autodata
3.38 Magyar on the official website of Autodata 3.38 Magyar. You can also visit other websites that offer help and advice for Autodata 3.38 Magyar users, such as MOTORCARSOFT.COM, carsoftos.com, or Hugging Face. You can also contact customer support or visit online forums for Autodata 3.38 Magyar.
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Avira Antivirus Pro 16.0.26.49 Final License Key .rar.md b/spaces/1gistliPinn/ChatGPT4/Examples/Avira Antivirus Pro 16.0.26.49 Final License Key .rar.md
deleted file mode 100644
index b9005336c63fc9bebf0a6accbd930382c88e8256..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Avira Antivirus Pro 16.0.26.49 Final License Key .rar.md
+++ /dev/null
@@ -1,40 +0,0 @@
-
-
How to Download and Install Avira Antivirus Pro 16.0.26.49 Final License Key .rar
-
Avira Antivirus Pro is one of the best antivirus programs that can protect your PC from online threats and malware. It offers web protection, anti-phishing, anti-ransomware, firewall, software updates, and more features to keep your system fast and secure.
-
Avira Antivirus Pro 16.0.26.49 Final License Key .rar
If you want to download and install Avira Antivirus Pro 16.0.26.49 Final License Key .rar, you need to follow these steps:
-
-
Go to this link and click on the "Buy Now" button.
-
Enter your payment details and confirm your purchase.
-
You will receive an email with a download link and a license key for Avira Antivirus Pro 16.0.26.49 Final.
-
Click on the download link and save the .rar file on your PC.
-
Extract the .rar file using a program like WinRAR or 7-Zip.
-
Run the setup.exe file and follow the installation wizard.
-
Enter your license key when prompted and activate your product.
-
Enjoy your Avira Antivirus Pro 16.0.26.49 Final with full features and protection.
-
-
If you have any questions or issues with your Avira Antivirus Pro 16.0.26.49 Final License Key .rar, you can contact Avira's customer support via a toll-free number or email. They will help you resolve any problems and provide you with the best service.
-
Avira Antivirus Pro 16.0.26.49 Final License Key .rar is a great security software that will protect you from major threats with little use of system resources. It also has many more features than some of its competitors: besides being a reliable antivirus it protects your privacy thanks to its free VPN and has a tool that helps you keep your PC clean of unnecessary files.
-
If you're looking for an all-in-one solution that offers you protection, privacy and smoother computer use, Avira Antivirus Pro 16.0.26.49 Final License Key .rar is an excellent choice.
-
-
What are the benefits of Avira Antivirus Pro 16.0.26.49 Final License Key .rar?
-
Avira Antivirus Pro 16.0.26.49 Final License Key .rar has many benefits that make it stand out from other antivirus programs. Here are some of them:
-
-
It blocks all online threats, including malicious websites, ransomware, and spyware.
-
It secures and anonymizes your online activities with a free VPN that has no data limits.
-
It automatically creates highly secure passwords and logs you in to your accounts with a password manager extension.
-
It updates your software and patches vulnerabilities with a software updater feature.
-
It helps you speed up and optimize your PC with a speed booster and a PC cleaner feature.
-
It protects you from phishing attacks on social networks and in your inbox, including COVID-19 scams.
-
It walls off sensitive access points to your device with a firewall feature.
-
It offers you unlimited access to premium customer support via a toll-free number or email.
-
-
-
How to get the best deal for Avira Antivirus Pro 16.0.26.49 Final License Key .rar?
-
If you want to get the best deal for Avira Antivirus Pro 16.0.26.49 Final License Key .rar, you should visit Avira's official website and compare the different plans and prices they offer. You can also look for discounts and coupons on third-party websites and platforms.
-
One of the best ways to save money on Avira Antivirus Pro 16.0.26.49 Final License Key .rar is to subscribe to Avira Prime, which is Avira's all-in-one solution that gives you unlimited access to all their premium services for up to 25 devices. You can get Avira Prime for 99,95 ⬠/ year (-40%) if you buy it now from their website.
-
Avira Prime includes Avira Antivirus Pro 16.0.26.49 Final License Key .rar as well as other products such as Avira Phantom VPN Pro, Avira System Speedup Pro, Avira Password Manager Pro, Avira Privacy Pal, Avira Software Updater Pro, and more. You can also enjoy exclusive features such as VIP customer support, unlimited devices, and priority updates.
-
Avira Prime is the ultimate security, privacy, and performance package that will keep you protected, anonymous, and fast online. Don't miss this opportunity and get Avira Prime today!
- d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Cam Tool V5 Full [UPDATED] Crack Rar.md b/spaces/1gistliPinn/ChatGPT4/Examples/Cam Tool V5 Full [UPDATED] Crack Rar.md
deleted file mode 100644
index f594f0d6fe49ffd6c66c8cd600b292a299c99b12..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Cam Tool V5 Full [UPDATED] Crack Rar.md
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
How to Download and Install CAM TOOL V5 Full Crack RAR
-
CAM TOOL V5 is a powerful CAD/CAM/CNC software that allows you to create high-quality products with optimal tool paths and minimal tool wear. It is especially suitable for machining complex shapes such as molds and dies. CAM TOOL V5 is a premium software that costs thousands of dollars, but you can download and install it for free with a crack file.
-
In this article, we will show you how to download and install CAM TOOL V5 full crack RAR step by step. You will need a Windows XP or 7 computer with at least 2 GB of RAM and 10 GB of free disk space. You will also need a reliable internet connection and a RAR extractor software such as WinRAR or 7-Zip.
The first step is to download the CAM TOOL V5 full crack RAR file from a trusted source. You can use the link below to download it directly from our website. The file size is about 1.5 GB, so it may take some time depending on your internet speed.
The password to extract the file is: www.example.com
-
Step 2: Extract CAM TOOL V5 Full Crack RAR
-
The second step is to extract the CAM TOOL V5 full crack RAR file using a RAR extractor software such as WinRAR or 7-Zip. You can right-click on the file and select "Extract Here" or "Extract to CAM TOOL V5/" from the menu. You will need to enter the password: www.example.com
-
After extracting the file, you will see a folder named "CAM TOOL V5" with several subfolders and files inside. You will need these files for the installation process.
-
Step 3: Install CAM TOOL V5 Full Crack RAR
-
The third step is to install CAM TOOL V5 full crack RAR on your computer. You will need to run the setup.exe file as an administrator. You can right-click on the file and select "Run as administrator" from the menu.
-
The installation wizard will guide you through the installation process. You will need to accept the license agreement, choose the installation directory, select the components to install, and enter the serial number. You can use the following serial number: XXXX-XXXX-XXXX-XXXX
-
-
After entering the serial number, you will need to copy and paste the crack file from the "Crack" folder to the installation directory. You can right-click on the file and select "Copy" from the menu, then go to the installation directory and right-click on an empty space and select "Paste" from the menu.
-
The crack file will replace the original file and activate CAM TOOL V5 full version. You can now launch CAM TOOL V5 from your desktop or start menu and enjoy its features.
-
Conclusion
-
In this article, we have shown you how to download and install CAM TOOL V5 full crack RAR step by step. We hope this article was helpful and informative for you. If you have any questions or problems, please leave a comment below or contact us via email.
-
Please note that downloading and installing cracked software is illegal and may harm your computer or data. We do not recommend or endorse this method and we are not responsible for any consequences that may arise from it. We suggest that you buy CAM TOOL V5 from its official website or authorized resellers if you want to use it legally and safely.
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Captainplanetepisodesinhindi.md b/spaces/1gistliPinn/ChatGPT4/Examples/Captainplanetepisodesinhindi.md
deleted file mode 100644
index bca64ef08c19decc268d6a51bd5878ee44cb8c7f..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Captainplanetepisodesinhindi.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-The Singles 1992 No Doubt Torrent · captainplanetepisodesinhindi · win case wn 622n driver download · Previous · Localization.txt Dll Call Of Duty 4 233 · Next. 1fdad05405
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Download Full Movie Prem Rog Film Of Rishi Kapoor EXCLUSIVE.md b/spaces/1gistliPinn/ChatGPT4/Examples/Download Full Movie Prem Rog Film Of Rishi Kapoor EXCLUSIVE.md
deleted file mode 100644
index 7f622295f88de6c3bd9b169dc8fd3d601c44b485..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Download Full Movie Prem Rog Film Of Rishi Kapoor EXCLUSIVE.md
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-Kapoor) suicide due to unrequited love, and his family's attempts to cover it up. It was also adapted into the Bengali film Abirudhan.The film was a major success at the box office. Anand .Kapoor and Jaya Kapoor were in this film.
-
-Plot
-
-The film begins with one of its characters, Shobana (Hema Malini), telling a group of people about her unsuccessful attempts to get a song from her dance teacher, Nirmala (Radha Salu). Shobana sings the song for everyone, and is turned away for lack of interest. Later, she sings the song to her son, Rajiv (Anand Kapoor), who asks her to accompany him to his music teacher's house. Rajiv does not answer Shobana's subsequent calls, and she thinks he is at the music teacher's house. She calls him once more, and he angrily tells her that she cannot come into his house.
-
-Rajiv, a poor rickshaw puller named Chitragupta (Ashok Kumar), and Bhoop (Ajay) are friends. Bhoop is in love with Chitragupta's daughter, Chitralekha (Sudha Chopra), who is also Chitragupta's girlfriend. Chitragupta is in love with Shobana, but she rejects him, believing him to be weak-willed. Chitralekha is frustrated by this rejection, and starts an affair with Bhoop. Rajiv is unaware of Chitralekha's affair, and believes that Chitralekha is in love with him.
-
-Chitralekha and Bhoop come to a bar owned by Rajiv. Chitralekha goes to the bathroom to ask Rajiv for a cigarette, but gets stuck in a toilet. Rajiv goes to the bathroom and finds her. He is unable to extricate her from the toilet, so he calls for help. Chitralekha is rescued by her friend, Shobana, who says that she heard about the rescue on the radio. Shobana and Rajiv argue, with Rajiv believing that Chitralekha's rescue is due to his fame as a rickshaw puller. Chitralekha is convinced by Shobana's friend, Rati (Dixit), that Shobana 4fefd39f24
-
-
-
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Far Cry 4 Update V1 3 0 Crack Fix ALI213.epub.md b/spaces/1gistliPinn/ChatGPT4/Examples/Far Cry 4 Update V1 3 0 Crack Fix ALI213.epub.md
deleted file mode 100644
index 1a110369aafd835a4c25e0a2ada5d08e5fefa539..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Far Cry 4 Update V1 3 0 Crack Fix ALI213.epub.md
+++ /dev/null
@@ -1,76 +0,0 @@
-
-
-!steam | ali213
-
- ali213: Valve have officially announced that they are developing Steam and are working with!ubuntu during their development, see for further details, see for install instructions, you can also join #ubuntu-steam for discussion.
-
- thanks
-
- and do i need that steam package or i can install directly from ubuntu software center?
-
- ali213: you can install it from ubuntu software centre
-
- ali213: but you need the game and play it on ubuntu steam
-
- got it thanks
-
- ali213: have you checked yet if your game is listed?
-
- list?
-
- i dont know what list you mean
-
- ali213: the software center in your ubuntu
-
- ali213:
-
- ali213: thats an example
-
- i got it
-
- ali213: there are many more
-
- ahhhh it is
-
- cool!
-
- lotuspsychje: can i know where are you from?
-
- ali213: sweden
-
- ahh ok
-
- do you like linux?
-
- :D
-
- omg
-
- damn
-
- what is going on
-
- sorry
-
- ali213: yeah i like ubuntu
-
- im in the wrong chat lol
-
- ali213: lol
-
- ali213: did you check game yet?
-
- i'm tired
-
- i've been awake for 4 days
-
- so no i didnt check it
-
- ali213: did you enable steam yet on ubuntu?
-
- no i didn't
-
- i 4fefd39f24
-
-
-
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Comparative Materia Medica by Dr. N. C. Ghosh in Bengali PDF Format.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Comparative Materia Medica by Dr. N. C. Ghosh in Bengali PDF Format.md
deleted file mode 100644
index ea9035f0ef8384aa5d67ab40774bc496d2c8fc31..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Comparative Materia Medica by Dr. N. C. Ghosh in Bengali PDF Format.md
+++ /dev/null
@@ -1,94 +0,0 @@
-
-
Comparative Materia Medica by NC Ghosh in Bengali PDF Download
-
If you are interested in learning more about homeopathy, one of the most essential subjects you need to study is materia medica. Materia medica is the collection of information about the therapeutic properties and uses of various substances, such as plants, animals, minerals, etc., that are used as remedies in homeopathy. Materia medica helps you to understand the nature, symptoms, and effects of each remedy, and how to select and prescribe them according to the principles of homeopathy.
-
comparative materia medica by nc ghosh in bengali pdf download
One of the best books on materia medica that you can read is Comparative Materia Medica by Dr. N.C. Ghosh. Dr. N.C. Ghosh was a renowned homeopath and scholar from India, who wrote several books and articles on homeopathy in Bengali and English languages. He was also a professor and principal of several homeopathic colleges in India, and a recipient of many awards and honors for his services to homeopathy.
-
About the book
-
Comparative Materia Medica is a comprehensive and authoritative book on homeopathic materia medica, written by Dr. N.C. Ghosh in Bengali language. The book covers more than 500 remedies, arranged alphabetically, with detailed descriptions of their sources, characteristics, modalities, keynotes, clinical indications, relationships, comparisons, and doses. The book also includes chapters on general principles of homeopathy, case taking, repertory, potency selection, diet and regimen, organon of medicine, philosophy of homeopathy, and history of homeopathy.
-
The book is based on the original works of Dr. Samuel Hahnemann, the founder of homeopathy, as well as other eminent homeopaths like Dr. James Tyler Kent, Dr. William Boericke, Dr. John Henry Clarke, Dr. Cyrus Maxwell Boger, Dr. Adolph von Lippe, Dr. Constantine Hering, Dr. Edward Bach, and many others. The book also incorporates the latest research and developments in homeopathy from India and abroad.
-
Features of the book
-
Comparative Materia Medica is a valuable resource for students, practitioners, teachers, and researchers of homeopathy. Some of the features of the book are:
-
-
It provides a thorough and systematic study of each remedy, with clear and concise explanations.
-
It compares and contrasts different remedies based on their similarities and differences.
-
It gives practical tips and guidelines for prescribing remedies in various acute and chronic diseases.
-
It contains numerous case examples and clinical experiences to illustrate the application of remedies.
-
It offers a holistic approach to healing by considering the physical, mental, emotional, and spiritual aspects of each patient.
-
It is written in simple and lucid language that is easy to understand and follow.
-
-
How to download the book
-
If you want to read Comparative Materia Medica by Dr. N.C. Ghosh in Bengali language online or offline, you can download it in pdf format from various websites that offer free or paid ebooks. Some of these websites are:
-
comparative materia medica by dr nc ghosh bengali medium
-comparative materia medica original hardcover by dr nc ghosh
-comparative materia medica book online low prices india
-comparative materia medica bengali medical books boipagol
-comparative materia medica 2014 edition by dr nc ghosh md usa
-comparative materia medica pdf free download courstika
-comparative materia medica homeopathic treatment book pdf
-comparative materia medica bengali hard cover book edition
-comparative materia medica amazon reviews ratings
-comparative materia medica revolutionary change in medical field
-comparative materia medica by nc ghosh best seller in india
-comparative materia medica bengali genre medical books
-comparative materia medica pdf in bengali archives courstika
-comparative materia medica by nc ghosh buy online flipkart
-comparative materia medica by nc ghosh ebook download
-comparative materia medica by nc ghosh pdf google drive
-comparative materia medica by nc ghosh read online free
-comparative materia medica by nc ghosh summary and review
-comparative materia medica by nc ghosh table of contents
-comparative materia medica by nc ghosh introduction and preface
-comparative materia medica by nc ghosh sample pages pdf
-comparative materia medica by nc ghosh discount and offers
-comparative materia medica by nc ghosh delivery and shipping
-comparative materia medica by nc ghosh customer service and support
-comparative materia medica by nc ghosh testimonials and feedback
-comparative materia medica by nc ghosh related books and authors
-comparative materia medica by nc ghosh similar products and services
-comparative materia medica by nc ghosh frequently asked questions
-comparative materia medica by nc ghosh benefits and features
-comparative materia medica by nc ghosh advantages and disadvantages
-comparative materia medica by nc ghosh pros and cons
-comparative materia medica by nc ghosh comparison and contrast
-comparative materia medica by nc ghosh analysis and evaluation
-comparative materia medica by nc ghosh recommendations and suggestions
-comparative materia medica by nc ghosh tips and tricks
-comparative materia medica by nc ghosh secrets and hacks
-comparative materia medica by nc ghosh facts and figures
-comparative materia medica by nc ghosh statistics and data
-comparative materia medica by nc ghosh research and studies
-comparative materia medica by nc ghosh history and background
-
-
Website
Link
-
Amazon.in
[Buy Comparative Materia Medica Book Online at Low Prices in India](^1^)
-
Flipkart.com
[Dr N C Ghosh Books - Buy Dr N C Ghosh Books Online at Best Prices In India](^2^)
-
Pdfdrive.com
[Comparative Materia Med ica by NC Ghosh.pdf - Free Download]
-
Archive.org
[Comparative Materia Medica : Dr. N.C. Ghosh : Free Download, Borrow, and Streaming]
To download the book from any of these websites, you need to follow these steps:
-
-
Click on the link of the website that you prefer.
-
Search for the book by typing its title or author name in the search box.
-
Select the book from the list of results and click on it.
-
Choose the format that you want to download, such as pdf, epub, mobi, etc.
-
Click on the download button and save the file on your device.
-
Open the file with a suitable reader application and enjoy reading the book.
-
-
Conclusion
-
Comparative Materia Medica by Dr. N.C. Ghosh is a must-read book for anyone who wants to learn more about homeopathy and materia medica. The book is a treasure trove of knowledge and wisdom that will help you to master the art and science of homeopathy. The book is available in Bengali language, which makes it accessible and convenient for the Bengali-speaking readers. You can download the book in pdf format from various websites and read it online or offline at your own pace and convenience.
-
If you are interested in buying a hard copy of the book, you can also order it online from Amazon.in or Flipkart.com, or visit your nearest bookstore and ask for it. The book is reasonably priced and worth every penny. You will not regret buying this book, as it will enrich your understanding and practice of homeopathy.
-
So, what are you waiting for? Download Comparative Materia Medica by Dr. N.C. Ghosh today and start reading this amazing book. You will be amazed by the insights and information that you will gain from this book. You will also be able to apply the remedies more effectively and confidently in your cases. You will be able to heal yourself and others with the power of homeopathy.
-
FAQs
-
What is comparative materia medica?
-
Comparative materia medica is a branch of homeopathic materia medica that compares and contrasts different remedies based on their similarities and differences. It helps to differentiate between similar remedies and to select the most suitable remedy for a given case.
-
Who is Dr. N.C. Ghosh?
-
Dr. N.C. Ghosh was a renowned homeopath and scholar from India, who wrote several books and articles on homeopathy in Bengali and English languages. He was also a professor and principal of several homeopathic colleges in India, and a recipient of many awards and honors for his services to homeopathy.
-
Why should I read Comparative Materia Medica by Dr. N.C. Ghosh?
-
You should read Comparative Materia Medica by Dr. N.C. Ghosh because it is a comprehensive and authoritative book on homeopathic materia medica, written in Bengali language. The book covers more than 500 remedies, with detailed descriptions, comparisons, and clinical indications. The book also includes chapters on general principles, case taking, repertory, potency selection, diet and regimen, organon of medicine, philosophy of homeopathy, and history of homeopathy.
-
How can I download Comparative Materia Medica by Dr. N.C. Ghosh in pdf format?
-
You can download Comparative Materia Medica by Dr. N.C. Ghosh in pdf format from various websites that offer free or paid ebooks, such as Amazon.in, Flipkart.com, Pdfdrive.com, Archive.org, Homeobook.com, etc. You need to click on the link of the website that you prefer, search for the book by typing its title or author name in the search box, select the book from the list of results and click on it, choose the format that you want to download, such as pdf, epub, mobi, etc., click on the download button and save the file on your device.
-
How can I read Comparative Materia Medica by Dr. N.C. Ghosh online or offline?
-
You can read Comparative Materia Medica by Dr. N.C. Ghosh online or offline by opening the file with a suitable reader application on your device. You can also print the file or transfer it to another device if you want.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Bubble Shooter Star Mod APK The Most Popular Bubble Shooting Game.md b/spaces/1phancelerku/anime-remove-background/Bubble Shooter Star Mod APK The Most Popular Bubble Shooting Game.md
deleted file mode 100644
index 80d99fd43470c2a0512752d496137d830ae4def7..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Bubble Shooter Star Mod APK The Most Popular Bubble Shooting Game.md
+++ /dev/null
@@ -1,108 +0,0 @@
-
-
Bubble Shooter Star Mod APK: A Fun and Addictive Game for Everyone
-
If you are looking for a casual game that can keep you entertained for hours, you should try Bubble Shooter Star. This is a classic bubble shooter game that has been updated with new features and challenges. You can download it for free from the Google Play Store, or you can get the modded version that gives you unlimited coins, gems, and other benefits. In this article, we will tell you everything you need to know about Bubble Shooter Star and its mod apk.
-
What is Bubble Shooter Star?
-
Bubble Shooter Star is a game developed by UP STUDIO, a company that specializes in casual games. It is one of the most popular bubble shooter games on the market, with over 5 million downloads and a 4.5-star rating on the Google Play Store. The game is suitable for players of all ages, as it is easy to learn but hard to master.
The gameplay of Bubble Shooter Star is simple and intuitive. You have to aim and shoot bubbles of the same color to make them pop and clear the board. You can use your finger to drag the laser pointer and release it to fire the bubble. You can also tap on the screen to change the color of the current bubble. The game has two modes: classic and arcade. In classic mode, you have to clear all the bubbles before they reach the bottom of the screen. In arcade mode, you have to clear as many bubbles as possible in a limited time.
-
Features of Bubble Shooter Star
-
Bubble Shooter Star has many features that make it fun and addictive. Here are some of them:
-
Classic and arcade modes
-
You can choose between two modes of gameplay: classic and arcade. Classic mode is more relaxing and strategic, while arcade mode is more fast-paced and challenging. You can switch between them anytime you want.
-
Hundreds of levels
-
The game has hundreds of levels that vary in difficulty and design. You will never get bored with the game, as each level has its own goals and obstacles. You can also replay any level you want to improve your score or get more stars.
-
Colorful graphics and sound effects
-
The game has colorful graphics that are pleasing to the eye. The bubbles are bright and shiny, and the backgrounds are vivid and lively. The game also has cheerful sound effects that match the mood of the game. You can hear the bubbles popping, the coins clinking, and the music playing.
-
Boosters and power-ups
-
The game has various boosters and power-ups that can help you clear the levels faster and easier. You can use them to blast more bubbles, change their colors, or create special effects. Some of them are free, while others require coins or gems to use.
-
Leaderboards and achievements
-
The game has leaderboards and achievements that can motivate you to play more and compete with other players. You can see your rank and score on the global or local leaderboards, and compare them with your friends or other players. You can also unlock achievements by completing certain tasks or reaching certain milestones in the game.
-
bubble shooter star mod apk download
-bubble shooter star mod apk unlimited money
-bubble shooter star mod apk latest version
-bubble shooter star mod apk free
-bubble shooter star mod apk android
-bubble shooter star mod apk offline
-bubble shooter star mod apk no ads
-bubble shooter star mod apk hack
-bubble shooter star mod apk 2023
-bubble shooter star mod apk for pc
-bubble shooter star games mod apk
-bubble shooter star boom mod apk
-bubble shooter star blast mod apk
-bubble shooter star pop mod apk
-bubble shooter star legend mod apk
-bubble shooter star deluxe mod apk
-bubble shooter star adventure mod apk
-bubble shooter star puzzle mod apk
-bubble shooter star match 3 mod apk
-bubble shooter star rescue mod apk
-download game bubble shooter star mod apk
-download bubble shooter star boom games mod apk
-download bubble shooter star blast games mod apk
-download bubble shooter star pop games mod apk
-download bubble shooter star legend games mod apk
-download bubble shooter star deluxe games mod apk
-download bubble shooter star adventure games mod apk
-download bubble shooter star puzzle games mod apk
-download bubble shooter star match 3 games mod apk
-download bubble shooter star rescue games mod apk
-how to install bubble shooter star mod apk
-how to play bubble shooter star mod apk
-how to update bubble shooter star mod apk
-how to hack bubble shooter star mod apk
-how to get unlimited money in bubble shooter star mod apk
-how to remove ads in bubble shooter star mod apk
-how to play offline in bubble shooter star mod apk
-how to play on pc in bubble shooter star mod apk
-how to get latest version of bubble shooter star mod apk
-how to get free bubbles in bubble shooter star mod apk
-best tips and tricks for bubble shooter star mod apk
-best strategies and guides for bubble shooter star mod apk
-best levels and challenges for bubble shooter star mod apk
-best features and graphics for bubble shooter star mod apk
-best reviews and ratings for bubble shooter star mod apk [^1^]
-best alternatives and similar games for bubble shooter star mod apk [^1^]
-best cheats and codes for bubble shooter star mod apk [^1^]
-best rewards and bonuses for bubble shooter star mod apk [^1^]
-best themes and sounds for bubble shooter star mod apk [^1^]
-
What is Bubble Shooter Star Mod APK?
-
Bubble Shooter Star Mod APK is a modified version of the original game that gives you some advantages over other players. It is not available on the Google Play Store, but you can download it from other sources online.
-
Why download Bubble Shooter Star Mod APK?
-
Bubble Shooter Star Mod APK is a version of the game that has been modified by some developers to give you some extra benefits that are not available in the original game. Here are some of the reasons why you might want to download Bubble Shooter Star Mod APK:
-
Unlimited coins and gems
-
Coins and gems are the main currencies in the game that you can use to buy boosters, power-ups, and other items. You can earn them by playing the game, watching ads, or completing tasks. However, they are not enough to enjoy the game fully, as some items are very expensive or require a lot of coins or gems to use. With Bubble Shooter Star Mod APK, you can get unlimited coins and gems for free. You can use them as much as you want without worrying about running out of them.
-
No ads and pop-ups
-
Ads and pop-ups are annoying and distracting, especially when you are playing a game. They can interrupt your gameplay, slow down your device, or consume your data. They can also ruin your mood and make you lose interest in the game. With Bubble Shooter Star Mod APK, you can get rid of all the ads and pop-ups that appear in the game. You can play the game smoothly and peacefully, without any interruptions or annoyances.
-
Easy installation and compatibility
-
Bubble Shooter Star Mod APK is easy to install and compatible with most Android devices. You don't need to root your device or do any complicated steps to install it. You just need to download the apk file from a reliable source online, and follow some simple instructions to install it on your device. You can also update it easily whenever there is a new version available.
-
How to download and install Bubble Shooter Star Mod APK?
-
If you want to download and install Bubble Shooter Star Mod APK on your Android device, you can follow this step-by-step guide:
-
Step-by-step guide
-
-
Go to a website that offers Bubble Shooter Star Mod APK for download, such as [HappyMod](^2^) or [HackerBot](^4^). Make sure that the website is trustworthy and safe, as some websites may contain viruses or malware that can harm your device.
-
Find the Bubble Shooter Star Mod APK file on the website, and tap on the download button. The apk file will start downloading to your device automatically.
-
Once the download is complete, go to your device's settings and enable the option to install apps from unknown sources. This will allow you to install apps that are not from the Google Play Store.
-
Go to your device's file manager and locate the downloaded apk file. Tap on it to start the installation process.
-
Follow the on-screen instructions and grant the necessary permissions to install the app.
-
Wait for the installation to finish, and then launch the app from your home screen or app drawer.
-
Enjoy playing Bubble Shooter Star with unlimited coins, gems, and no ads!
-
-
Conclusion
-
Bubble Shooter Star is a fun and addictive game that you can play anytime and anywhere. It has two modes, hundreds of levels, colorful graphics, sound effects, boosters, power-ups, leaderboards, and achievements. It is a great game for relaxing and killing time. However, if you want to enjoy the game more, you can download Bubble Shooter Star Mod APK, which gives you unlimited coins, gems, no ads, and other benefits. You can download it easily from a reliable website online, and install it on your Android device without rooting it. You can then play the game with more freedom and fun.
-
Frequently Asked Questions
-
-
Q: Is Bubble Shooter Star Mod APK safe?
-
A: Yes, Bubble Shooter Star Mod APK is safe if you download it from a trustworthy website online. However, you should always be careful when downloading any modded app or game from unknown sources, as they may contain viruses or malware that can harm your device.
-
Q: Do I need an internet connection to play Bubble Shooter Star?
-
A: No, you don't need an internet connection to play Bubble Shooter Star. You can play it offline without any problem. However, if you want to access some features such as leaderboards or achievements, you will need an internet connection.
-
Q: How do I update Bubble Shooter Star Mod APK?
-
A: To update Bubble Shooter Star Mod APK, you will need to download the latest version of the apk file from the same website where you downloaded it before. Then, you will need to uninstall the old version of the app and install the new one. You can also check the website for any updates or notifications about the app.
-
Q: Can I play Bubble Shooter Star Mod APK on PC?
-
A: Yes, you can play Bubble Shooter Star Mod APK on PC if you use an Android emulator. An Android emulator is a software that allows you to run Android apps and games on your PC. Some of the popular Android emulators are [BlueStacks], [NoxPlayer], and [MEmu]. You can download any of them from their official websites, and then install Bubble Shooter Star Mod APK on them.
-
Q: What are some other bubble shooter games that I can play?
-
A: There are many other bubble shooter games that you can play on your Android device or PC. Some of them are [Bubble Witch Saga], [Angry Birds POP], [Panda Pop], and [Bubble Shooter Legend]. You can find them on the Google Play Store or other websites online.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Como baixar Gacha Life verso antiga sem problemas.md b/spaces/1phancelerku/anime-remove-background/Como baixar Gacha Life verso antiga sem problemas.md
deleted file mode 100644
index 7e95ea182391f8713a1a1a18dca32f2a8895b802..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Como baixar Gacha Life verso antiga sem problemas.md
+++ /dev/null
@@ -1,125 +0,0 @@
-
-
Gacha Life Versão Antiga Download: How to Play the Popular Anime Game on Your Device
-
If you are a fan of anime games, you might have heard of Gacha Life, a free game that lets you create your own anime characters and stories. Gacha Life is one of the most popular games in the genre, with millions of downloads and positive reviews. But did you know that you can also play the old version of the game, called Gacha Life versão antiga, on your device? In this article, we will tell you what Gacha Life is, why it is so popular, how to download and install Gacha Life versão antiga on your device, how to play and enjoy it, and some tips and tricks for playing it. Read on to find out more!
-
What is Gacha Life and why is it so popular?
-
Gacha Life is a free anime game that lets you create your own characters and stories
-
Gacha Life is a game developed by Lunime, a company that specializes in anime games. The game was released in 2018 for iOS and Android devices. The main feature of the game is that it allows you to create your own anime characters using hundreds of clothes, hairstyles, weapons, accessories, and more. You can also customize your characters' appearance, personality, relationship, occupation, and background. You can save up to 20 characters of your own design.
Gacha Life has many features and modes that appeal to different types of players
-
Besides creating characters, Gacha Life also offers many other features and modes that make the game fun and engaging. Here are some of them:
-
-
Studio mode: This mode lets you create your own scenes using up to 8 characters. You can enter custom text for your characters and choose from many different poses and backgrounds. You can also make your own stories using the Skit Maker mode, where you can easily combine multiple scenes to create sketches.
-
Life mode: This mode lets you explore different areas with your own characters, such as the town , the school, the park, and more. You can chat with NPCs and learn more about their lives. You can also get surprises from them if you talk to them enough.
-
Gacha mode: This mode lets you play mini-games and collect gems, which you can use to gacha for rare gifts. You can get clothes, accessories, pets, and more from the gacha. You can also trade your gifts with other players online.
-
-
With so many features and modes, Gacha Life has something for everyone. Whether you like to create, role-play, socialize, or just have fun, you can find it in Gacha Life.
-
How to download and install Gacha Life versão antiga on your device
-
Gacha Life versão antiga is the old version of the game that was released in 2018
-
Gacha Life versão antiga is the Portuguese name for the old version of Gacha Life. It is the original version of the game that was released in 2018, before it was updated with new features and improvements in 2019. Some players prefer to play Gacha Life versão antiga because they like the old graphics, interface, and gameplay better. Some also find it easier to run on their devices, especially if they have low-end or older models.
-
You can download Gacha Life versão antiga from Aptoide, a third-party app store
-
If you want to play Gacha Life versão antiga on your device, you will need to download it from a third-party app store, since it is no longer available on the official Google Play Store or Apple App Store. One of the most popular and reliable app stores that offer Gacha Life versão antiga is Aptoide, a platform that allows users to download and share apps that are not available on the official stores. Aptoide is safe and secure, and has millions of users worldwide.
-
You need to enable unknown sources and follow the installation steps to play the game
-
To download and install Gacha Life versão antiga from Aptoide, you will need to follow these steps:
-
-
Go to Aptoide's website and download the Aptoide app on your device.
-
Open the Aptoide app and search for "Gacha Life" in the search bar.
-
Scroll down and look for the version that says "1.0.9" or "1.1.0". These are the old versions of Gacha Life that are equivalent to Gacha Life versão antiga.
-
Tap on the download button and wait for the file to be downloaded on your device.
-
Before installing the file, you will need to enable unknown sources on your device. This will allow you to install apps from sources other than the official stores. To do this, go to your device's settings and look for security or privacy options. Then, find the option that says "unknown sources" or "allow installation of apps from unknown sources" and toggle it on.
-
Once you have enabled unknown sources, go back to the file manager and locate the downloaded file. Tap on it and follow the installation steps.
-
After installing the file, you can open Gacha Life versão antiga on your device and start playing!
-
-
How to play and enjoy Gacha Life versão antiga on your device
-
You can customize your characters with hundreds of clothes, hairstyles, accessories, and more
-
One of the main attractions of Gacha Life versão antiga is that it lets you create your own anime characters with a lot of customization options. You can access the character creation mode by tapping on the "Dress Up" button on the home screen. There, you can choose from hundreds of clothes, hairstyles, weapons, accessories, hats, glasses, pets, and more to dress up your characters. You can also change their skin color, eye color, hair color, facial expression , and more. You can also give your characters a name, a personality, a relationship, an occupation, and a background story. You can save up to 20 characters of your own design and switch between them anytime.
-
You can create your own scenes and skits with the Studio and Skit Maker modes
-
Another feature of Gacha Life versão antiga that lets you unleash your creativity is the Studio mode. This mode allows you to create your own scenes using up to 8 characters. You can enter custom text for your characters and choose from many different poses and backgrounds. You can also add props, effects, bubbles, and more to make your scenes more lively. You can save up to 100 scenes of your own creation and view them anytime.
-
If you want to make your own stories using your scenes, you can use the Skit Maker mode. This mode lets you easily combine multiple scenes to create skits. You can add transitions, music, sound effects, and more to make your skits more interesting. You can save up to 50 skits of your own creation and play them anytime.
-
gacha life versão antiga apk
-gacha life versão antiga uptodown
-gacha life versão antiga para pc
-gacha life versão antiga android
-gacha life versão antiga 1.0.9
-gacha life versão antiga 1.0.8
-gacha life versão antiga 1.0.7
-gacha life versão antiga 1.0.2
-gacha life versão antiga 1.0.1
-gacha life versão antiga 1.0.0
-gacha life versão antiga 1.0
-gacha life versão antiga baixar
-gacha life versão antiga instalar
-gacha life versão antiga jogar
-gacha life versão antiga online
-gacha life versão antiga gratis
-gacha life versão antiga português
-gacha life versão antiga atualizada
-gacha life versão antiga original
-gacha life versão antiga mod
-gacha life versão antiga hackeada
-gacha life versão antiga sem vírus
-gacha life versão antiga sem bug
-gacha life versão antiga sem erro
-gacha life versão antiga sem anúncio
-gacha life versão antiga com chat
-gacha life versão antiga com música
-gacha life versão antiga com personagens
-gacha life versão antiga com roupas
-gacha life versão antiga com acessórios
-gacha life versão antiga com cenários
-gacha life versão antiga com minigames
-gacha life versão antiga com estúdio
-gacha life versão antiga com modo história
-gacha life versão antiga com modo vida
-gacha life versão antiga como baixar
-gacha life versão antiga como instalar
-gacha life versão antiga como jogar
-gacha life versão antiga como atualizar
-gacha life versão antiga como desinstalar
-download de gacha life versão antiga
-download do gacha life versão antiga
-download grátis de gacha life versão antiga
-download seguro de gacha life versão antiga
-download rápido de gacha life versão antiga
-download fácil de gacha life versão antiga
-download completo de gacha life versão antiga
-download direto de gacha life versão antiga
-download oficial de gacha life versão antiga
-
You can explore different areas and chat with NPCs in the Life mode
-
If you want to experience a more immersive and interactive gameplay, you can try the Life mode. This mode lets you explore different areas with your own characters, such as the town, the school, the park, and more. You can chat with NPCs and learn more about their lives. You can also get surprises from them if you talk to them enough. Some NPCs may give you gifts, quests, or secrets. Some may even join your party and become playable characters.
-
You can play mini-games and collect gems to gacha for rare gifts in the Gacha mode
-
If you want to have some fun and challenge yourself, you can play the Gacha mode. This mode lets you play mini-games and collect gems, which you can use to gacha for rare gifts. You can get clothes, accessories, pets, and more from the gacha. You can also trade your gifts with other players online.
-
The mini-games are simple but addictive games that test your skills and reflexes. There are 8 mini-games in total: Bex's Festival, Phantom's Remix, Duck & Dodge, Abushu Candy Toss, Narwhal Sky, Orca Sploosh, Picc Pawket Rhythm, and Lemo & Yumi's Math Game. Each mini-game has different levels of difficulty and rewards. You can earn up to 200 gems per mini-game per day.
-
Tips and tricks for playing Gacha Life versão antiga on your device
-
Choose a character you don't like to create a new one
-
If you want to create a new character but you have already used up all 20 slots, you can choose a character you don't like and edit it. This way, you don't have to delete any of your existing characters. Just make sure you save the character before editing it.
-
Use the preset menu to access more unique characters and recover edited ones
-
If you want to access more unique characters that are not available in the default menu, you can use the preset menu. This menu lets you choose from 90 preset characters that have different looks and personalities. You can also use this menu to recover any edited characters that you want to restore to their original state.
-
Use the random buttons to generate different looks and colors for your characters
-
If you want to experiment with different looks and colors for your characters, you can use the random buttons. These buttons let you randomly change the clothes, hairstyles, accessories, colors, and more of your characters. You can also use these buttons to get inspiration for your own designs.
-
Use the copy and paste buttons to save time when making skits
-
If you want to save time when making skits, you can use the copy and paste buttons. These buttons let you copy and paste the text, pose, expression , and background of a character in a scene. You can then paste them to another character or scene. This way, you don't have to type or select the same things over and over again.
-
Visit different locations and talk to NPCs to learn more about them and get surprises
-
If you want to learn more about the NPCs and their stories, you can visit different locations and talk to them. Each NPC has a unique personality and dialogue. Some of them may also give you gifts, quests, or secrets if you talk to them enough. Some of them may even join your party and become playable characters. You can also see their profiles and relationship status with other NPCs in the game.
-
Conclusion
-
Gacha Life versão antiga is a fun and creative game that lets you express yourself through anime characters and stories
-
Gacha Life versão antiga is a game that lets you create your own anime characters and stories with a lot of customization options. You can also play with various features and modes that suit your preferences. Whether you like to create, role-play, socialize, or just have fun, you can find it in Gacha Life versão antiga.
-
You can download Gacha Life versão antiga from Aptoide and install it on your device with some simple steps
-
If you want to play Gacha Life versão antiga on your device, you can download it from Aptoide, a third-party app store that offers the old version of the game. You will need to enable unknown sources on your device and follow the installation steps to play the game.
-
You can play Gacha Life versão antiga with various features and modes that suit your preferences
-
Once you have installed Gacha Life versão antiga on your device, you can start playing it with various features and modes. You can customize your characters with hundreds of clothes, hairstyles, accessories, and more. You can create your own scenes and skits with the Studio and Skit Maker modes. You can explore different areas and chat with NPCs in the Life mode. You can play mini-games and collect gems to gacha for rare gifts in the Gacha mode.
-
You can use some tips and tricks to enhance your gaming experience and have more fun
-
To make your gaming experience more enjoyable and fun, you can use some tips and tricks that we have shared in this article. You can choose a character you don't like to create a new one. You can use the preset menu to access more unique characters and recover edited ones. You can use the random buttons to generate different looks and colors for your characters. You can use the copy and paste buttons to save time when making skits. You can visit different locations and talk to NPCs to learn more about them and get surprises.
-
FAQs
-
What is the difference between Gacha Life versão antiga and Gacha Life?
-
Gacha Life versão antiga is the old version of Gacha Life that was released in 2018. It has the original graphics, interface, and gameplay of the game. Gacha Life is the updated version of Gacha Life that was released in 2019. It has new features and improvements, such as new characters, clothes, backgrounds, modes, chat rooms, and more.
-
Is Gacha Life versão antiga safe to download?
-
Gacha Life versão antiga is safe to download if you get it from a reliable source, such as Aptoide. Aptoide is a secure platform that allows users to download and share apps that are not available on the official stores. However, you should always be careful when downloading apps from unknown sources and check for any permissions or warnings before installing them.
-
Can I play Gacha Life versão antiga online?
-
Gacha Life versão antiga does not have an online mode, unlike Gacha Life. However, you can still trade gifts with other players online using the Gacha mode. You can also chat with other players using external apps or platforms, such as Discord or Reddit.
-
Can I transfer my data from Gacha Life versão antiga to Gacha Life?
-
No, you cannot transfer your data from Gacha Life versão antiga to Gacha Life. The two versions of the game are not compatible with each other. If you want to play Gacha Life, you will need to start from scratch.
-
Can I play Gacha Life versão antiga on PC?
-
Yes, you can play Gacha Life versão antiga on PC using an emulator. An emulator is a software that allows you to run Android apps on your PC . Some of the most popular emulators are BlueStacks, NoxPlayer, and LDPlayer. You can download any of these emulators from their official websites and install them on your PC. Then, you can download Gacha Life versão antiga from Aptoide and install it on the emulator. You can then play Gacha Life versão antiga on your PC with a larger screen and better controls.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Facebook APK for iPad Everything You Need to Know.md b/spaces/1phancelerku/anime-remove-background/Facebook APK for iPad Everything You Need to Know.md
deleted file mode 100644
index 1fe48b2f2be637a112e110b8b1e1237ebc79038c..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Facebook APK for iPad Everything You Need to Know.md
+++ /dev/null
@@ -1,125 +0,0 @@
-
-
Facebook APK iPad: How to Download and Install the App on Your Device
-
Facebook is one of the most popular social media platforms in the world, with over 2.9 billion monthly active users as of June 2021. If you are an iPad user, you might be wondering how to download and install the Facebook app on your device. In this article, we will show you how to use Facebook APK, a file format that allows you to install apps from sources other than the official App Store, on your iPad. We will also explain what Facebook APK is, how it differs from IPA files, what are the benefits of using it, and how to use Facebook on your iPad.
-
What is Facebook APK?
-
Facebook APK is a file format that contains the installation package of the Facebook app for Android devices. APK stands for Android Package Kit, and it is similar to the IPA file format that is used for iOS devices. However, there are some differences between these two file formats.
APK and IPA files are both executable files that contain the code, resources, and metadata of an app. However, they are designed for different operating systems and devices. APK files are compatible with Android devices, while IPA files are compatible with iOS devices. Therefore, you cannot install an APK file on an iOS device or vice versa, unless you use some special tools or methods.
-
The benefits of using APK files
-
One of the main benefits of using APK files is that they allow you to install apps from sources other than the official App Store. This means that you can access apps that are not available in your region, or that have been removed or banned from the App Store for some reason. You can also get access to beta versions or older versions of apps that might have features or functions that you prefer over the newer ones. Additionally, you can save bandwidth and storage space by downloading APK files directly from the web instead of through the App Store.
-
How to Download Facebook APK for iPad
-
If you want to download and install Facebook APK for iPad, you will need to meet some requirements and follow some steps. Here are the details:
-
The requirements for installing APK files on iPad
-
To install APK files on your iPad, you will need to have a jailbroken device. Jailbreaking is a process that removes the restrictions and limitations imposed by Apple on iOS devices, allowing you to customize your device and install apps from third-party sources. However, jailbreaking also voids your warranty and exposes your device to security risks and malware. Therefore, you should only jailbreak your device if you know what you are doing and are willing to take the risks.
-
You will also need to have a file manager app on your iPad, such as iFile or Filza, that can access the root directory of your device and allow you to install APK files. You can download these apps from Cydia, a marketplace for jailbroken devices.
-
The steps to download and install Facebook APK for iPad
-
Once you have a jailbroken device and a file manager app, you can follow these steps to download and install Facebook APK for iPad:
-
-
Open your web browser on your iPad and go to a website that offers Facebook APK files, such as APKPure or APKMirror.
-
Search for Facebook in the website and choose the version that you want to download. Make sure that the version is compatible with your device and iOS version.
-
Tap on the download button and wait for the file to be downloaded.
-
APK file. Tap on the file and choose to open it with your file manager app.
-
Follow the instructions on the screen to install the Facebook APK file on your iPad. You might need to grant some permissions or trust some certificates during the process.
-
Once the installation is complete, you should see the Facebook app icon on your home screen. Tap on it to launch the app and log in with your Facebook account.
-
-
How to Use Facebook on iPad
-
Now that you have installed Facebook APK on your iPad, you can enjoy using the app on your device. Here are some of the features of Facebook app for iPad and some tips and tricks to optimize your Facebook experience on iPad.
-
facebook app for ipad download
-facebook lite apk for ipad
-how to install facebook apk on ipad
-facebook apk ipad mini
-facebook apk ipad pro
-facebook apk ipad 2
-facebook apk ipad air
-facebook apk ipad 4
-facebook apk ipad 3
-facebook apk ipad 1
-facebook messenger apk for ipad
-facebook mod apk for ipad
-facebook dark mode apk for ipad
-facebook apk for ipad free download
-facebook apk for ipad old version
-facebook apk for ipad 2023
-facebook apk for ipad 2022
-facebook apk for ipad 2021
-facebook apk for ipad 2020
-facebook apk for ipad 2019
-facebook video downloader apk for ipad
-facebook gameroom apk for ipad
-facebook dating apk for ipad
-facebook creator studio apk for ipad
-facebook business suite apk for ipad
-facebook watch apk for ipad
-facebook marketplace apk for ipad
-facebook groups apk for ipad
-facebook pages manager apk for ipad
-facebook ads manager apk for ipad
-download latest version of facebook apk for ipad
-download old version of facebook apk for ipad
-download modded version of facebook apk for ipad
-how to update facebook apk on ipad
-how to delete facebook apk on ipad
-how to use facebook apk on ipad
-how to get dark mode on facebook apk on ipad
-how to download videos from facebook apk on ipad
-how to play games on facebook apk on ipad
-how to access dating on facebook apk on ipad
-best alternative to facebook apk on ipad
-best settings for facebook apk on ipad
-best features of facebook apk on ipad
-best tips and tricks for using facebook apk on ipad
-benefits of using facebook apk on ipad
-disadvantages of using facebook apk on ipad
-problems with using facebook apk on ipad
-solutions for using facebook apk on ipad
-
The features of Facebook app for iPad
-
The Facebook app for iPad has most of the features that you can find on the Facebook app for Android or iPhone, such as:
-
-
News Feed: You can see the latest posts from your friends, pages, groups, and other sources that you follow on Facebook. You can also like, comment, share, and react to the posts.
-
Messenger: You can send and receive messages, photos, videos, stickers, emojis, and voice notes with your friends and contacts on Facebook. You can also make voice and video calls, create group chats, and use various chat features.
-
Watch: You can watch videos from different categories, such as entertainment, news, sports, gaming, and more. You can also follow your favorite creators, pages, and shows on Facebook Watch.
-
Marketplace: You can buy and sell items with people in your local community or nearby areas. You can browse through different categories, such as vehicles, electronics, clothing, and more. You can also post your own items for sale or search for items that you want to buy.
-
Gaming: You can play games with your friends or other people on Facebook. You can choose from a variety of games, such as puzzles, trivia, arcade, action, and more. You can also join gaming groups and communities to chat with other gamers and discover new games.
-
-
The tips and tricks to optimize your Facebook experience on iPad
-
Here are some tips and tricks that can help you optimize your Facebook experience on iPad:
-
-
Use landscape mode: The Facebook app for iPad supports landscape mode, which means that you can rotate your device horizontally to get a wider view of the app. This can make it easier to read posts, watch videos, play games, and use other features.
-
Use split view: The Facebook app for iPad also supports split view, which means that you can use two apps side by side on your device. This can be useful if you want to multitask or use another app while using Facebook. For example, you can use Safari to browse the web or Notes to write something while using Facebook.
-
Use shortcuts: The Facebook app for iPad has some shortcuts that can help you navigate the app faster and easier. For example, you can swipe left or right to switch between tabs, swipe down to refresh the news feed, swipe up to access the menu bar, or tap and hold on an item to access more options.
-
Use widgets: The Facebook app for iPad has some widgets that you can add to your home screen or today view. These widgets can show you information such as your notifications, friend requests, birthdays, events, memories, and more. You can also tap on the widgets to open the corresponding feature in the app.
-
manage your privacy and security settings, and more.
-
-
Conclusion
-
In conclusion, Facebook APK is a file format that allows you to install the Facebook app for Android devices on your iPad. It has some benefits, such as accessing apps that are not available in the App Store, but it also has some risks, such as voiding your warranty and exposing your device to malware. Therefore, you should only use Facebook APK on your iPad if you know what you are doing and are willing to take the risks. You will also need to have a jailbroken device and a file manager app to download and install Facebook APK on your iPad. Once you have installed the app, you can enjoy using Facebook on your iPad with its various features and functions.
-
FAQs
-
Here are some frequently asked questions about Facebook APK for iPad:
-
-
-
Question
-
Answer
-
-
-
Can I use Facebook APK on my iPad without jailbreaking?
-
No, you cannot use Facebook APK on your iPad without jailbreaking. You will need to jailbreak your device to install APK files on it.
-
-
-
Is Facebook APK safe to use on my iPad?
-
Facebook APK is not officially supported or endorsed by Facebook or Apple, so it is not guaranteed to be safe or secure. You might encounter some bugs, errors, or compatibility issues when using it. You might also expose your device to malware or viruses when downloading or installing APK files from unknown sources.
-
-
-
Will Facebook APK update automatically on my iPad?
-
No, Facebook APK will not update automatically on your iPad. You will need to manually download and install the latest version of the app from the web whenever there is an update.
-
-
-
Can I use Facebook APK and Facebook IPA on the same device?
-
No, you cannot use Facebook APK and Facebook IPA on the same device. You can only have one version of the app installed on your device at a time.
-
-
-
Can I use other APK files on my iPad?
-
Yes, you can use other APK files on your iPad, as long as they are compatible with your device and iOS version. However, you should be careful when downloading and installing APK files from unknown sources, as they might contain malware or viruses.
-
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/2023Liu2023/bingo/src/components/chat-message.tsx b/spaces/2023Liu2023/bingo/src/components/chat-message.tsx
deleted file mode 100644
index bf272d8d7005cfd06c53bd213e09ea217e803549..0000000000000000000000000000000000000000
--- a/spaces/2023Liu2023/bingo/src/components/chat-message.tsx
+++ /dev/null
@@ -1,93 +0,0 @@
-import remarkGfm from 'remark-gfm'
-import remarkMath from 'remark-math'
-import supersub from 'remark-supersub'
-import remarkBreaks from 'remark-breaks'
-import { cn } from '@/lib/utils'
-import { CodeBlock } from '@/components/ui/codeblock'
-import { MemoizedReactMarkdown } from '@/components/markdown'
-import { LearnMore } from './learn-more'
-import { ChatMessageModel } from '@/lib/bots/bing/types'
-import { useEffect } from 'react'
-import { TurnCounter } from './turn-counter'
-
-export interface ChatMessageProps {
- message: ChatMessageModel
-}
-
-export function ChatMessage({ message, ...props }: ChatMessageProps) {
- useEffect(() => {
- if (document.body.scrollHeight - window.innerHeight - window.scrollY - 200 < 0) {
- window.scrollBy(0, 200)
- }
- }, [message.text])
-
- return message.text ? (
-
').split(", ")
- retr_md += "### LLM-KNOWLEDGE:" + " " + "> " + highlight_knowledge(entities, self_retrieve_knowledge) + "\n\n"
- retr_md += "### KB-KNOWLEDGE:" + " " + "> " + highlight_knowledge(entities, kb_retrieve_knowledge) + "\n\n"
- refine_md = retr_md + "### REFINED-KNOWLEDGE:" + " " + "> "
- refine_md += highlight_knowledge(entities, refine_knowledge)
-
-
- return gr.update(value="## 🪄 Self refinement..."), gr.update(visible=True, label="", value='./figs/self-refinement.png'), \
- gr.update(value=refine_md), gr.update(value=refine_knowledge), \
- gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
-
-def self_revision(input_question, datatype, demo_text, refined_knowledge, engine):
- print(demo_text)
- print(refined_knowledge)
- ori_cots = auto_cot_consi(input_question, demo_text, engine)
- cor_cots, cor_ans = cot_revision(datatype, input_question, ori_cots, refined_knowledge, engine)
- cor_cots_md = "### Revised Rationales:" + "\n\n"
- for cor_cot in cor_cots:
- cor_cots_md += "> " + cor_cot + "\n\n"
- cor_ans = ", ".join(cor_ans)
-
- return gr.update(value="## 🔧 Self revision..."), gr.update(visible=True, label="", value='./figs/self-revision.png'), \
- gr.update(value=cor_cots_md), gr.update(value=cor_ans), \
- gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
-
-def self_consistency(cor_ans, datatype, question, engine):
- cor_ans = cor_ans.strip().split(", ")
- our_ans, ans_dict = consistency(cor_ans)
- zeroshot_ans = zero_shot(datatype, question, engine)
-
- return gr.update(value="## 🗳 Self consistency..."), gr.update(visible=True, label="", value='./figs/self-consistency.png'), \
- gr.update(value=""), gr.update(value=ans_dict, visible=True), \
- gr.update(visible=True, value=our_ans), gr.update(visible=True, value=zeroshot_ans), \
- gr.update(visible=True), gr.update(visible=True), gr.update(visible=True), \
- gr.update(visible=True, value='We would appreciate it very much if you could share your feedback. ')
-
-
-def reset():
- return gr.update(value=""), gr.update(value=""), gr.update(value=""), \
- gr.update(visible=False), gr.update(value=""), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False),\
- gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(value="")
-
-#theme from: https://huggingface.co/spaces/gradio/theme-gallery
-#EveryPizza/Cartoony-Gradio-Theme
-#JohnSmith9982/small_and_pretty
-#bethecloud/storj_theme
-#gradio/soft
-with gr.Blocks(theme="bethecloud/storj_theme", css="#process_btn {background-color:#8BA3C5}") as demo:
- gr.Markdown("# AuRoRA: Augmented Reasoning and Refining with Task-Adaptive Chain-of-Thought Prompting")
- #with gr.Row():
- #gr.Markdown("官网(中):https://anni-zou.github.io/aurora-zh.github.io/")
- #gr.Markdown("Website:https://anni-zou.github.io/aurora-en.github.io/")
- with gr.Row():
- with gr.Column(scale=4):
- input_question = gr.Textbox(placeholder="Input question here, or select an example from below.", label="Input Question",lines=2)
- store_flag = gr.Checkbox(label="Store data",value=True, interactive=True, info="If you agree to store data for research and development use:")
- single_data = gr.JSON(visible=False)
- with gr.Column(scale=3):
- engine = gr.Dropdown(choices=['gpt-3.5-turbo','text-davinci-003', 'text-davinci-002', 'text-curie-001', 'text-babbage-001', 'text-ada-001'],
- label="Engine", value="text-davinci-003", interactive=True, info="Choose the engine and have a try!")
- reset_btn = gr.Button(value='RESET')
- #examples = gr.Examples(examples=EXAMPLES, inputs=[input_question])
-
- with gr.Row():
- with gr.Column(scale=1):
- type_btn = gr.Button(value="Self-identification", variant='primary', scale=1, elem_id="process_btn")
- with gr.Column(scale=3):
- datatype = gr.Dropdown(choices=['arithmetic','commonsense-mc','commonsense-verify','symbolic-letter','symbolic-coin','UNDEFINED'],
- label="Input Type", info="If you disagree with our output, please select manually.", scale=3)
-
- demo_text = gr.Textbox(visible=False)
- entities = gr.Textbox(visible=False)
- self_know = gr.Textbox(visible=False)
- kb_know = gr.Textbox(visible=False)
- refine_know = gr.Textbox(visible=False)
- cor_ans = gr.Textbox(visible=False)
- with gr.Row():
- const_btn = gr.Button(value='Self-construction', variant='primary', elem_id="process_btn")
- retr_btn = gr.Button(value='Self-retrieval', variant='primary', elem_id="process_btn")
- refine_btn = gr.Button(value='Self-refinement', variant='primary', elem_id="process_btn")
- revis_btn = gr.Button(value='Self-revision', variant='primary', elem_id="process_btn")
- consis_btn = gr.Button(value='Self-consistency', variant='primary', elem_id="process_btn")
-
- sub_title = gr.Markdown()
- with gr.Row():
- with gr.Column(scale=2):
- plot = gr.Image(label="Visualization of clustering", visible=False)
- with gr.Column(scale=3):
- md = gr.Markdown()
- label = gr.Label(visible=False, label="Consistency Predictions")
- ans_ours = gr.Textbox(label="AuRoRA Answer",visible=False)
- ans_zeroshot = gr.Textbox(label="Zero-shot Answer", visible=False)
- with gr.Row():
- feedback_agree = gr.Button(value='😊 Agree', variant='secondary', visible=False)
- feedback_disagree = gr.Button(value='🙁 Disagree', variant='secondary', visible=False)
- feedback_uncertain = gr.Button(value='🤔 Uncertain', variant='secondary', visible=False)
- feedback_ack = gr.Markdown(value='', visible=True, interactive=False)
-
-
- type_btn.click(identify_type, inputs=[input_question, engine], outputs=[datatype])
- const_btn.click(self_construction, inputs=[datatype], outputs=[sub_title, plot, md, demo_text, label, ans_ours, ans_zeroshot, feedback_agree, feedback_disagree, feedback_uncertain, feedback_ack])
- retr_btn.click(self_retrieval, inputs=[input_question, engine], outputs=[sub_title, plot, md, entities, self_know, kb_know, label, ans_ours, ans_zeroshot, feedback_agree, feedback_disagree, feedback_uncertain, feedback_ack])
- refine_btn.click(self_refinement, inputs=[input_question, entities, self_know, kb_know, engine], outputs=[sub_title, plot, md, refine_know, label, ans_ours, ans_zeroshot, feedback_agree, feedback_disagree, feedback_uncertain, feedback_ack])
- revis_btn.click(self_revision, inputs=[input_question, datatype, demo_text, refine_know, engine], outputs=[sub_title, plot, md, cor_ans, label, ans_ours, ans_zeroshot, feedback_agree, feedback_disagree, feedback_uncertain, feedback_ack])
- consis_btn.click(self_consistency, inputs=[cor_ans, datatype, input_question, engine], outputs=[sub_title, plot, md, label, ans_ours, ans_zeroshot, feedback_agree, feedback_disagree, feedback_uncertain, feedback_ack])
- reset_btn.click(reset, inputs=[], outputs=[input_question, datatype, sub_title, plot, md, label, ans_ours, ans_zeroshot, feedback_agree, feedback_disagree, feedback_uncertain, feedback_ack])
-
- feedback_agree.click(record_feedback_agree, inputs=[input_question, datatype, ans_ours, ans_zeroshot, self_know, kb_know, refine_know, cor_ans ,store_flag], outputs=[feedback_agree, feedback_disagree, feedback_uncertain, feedback_ack])
- feedback_disagree.click(record_feedback_disagree, inputs=[input_question, datatype, ans_ours, ans_zeroshot, self_know, kb_know, refine_know, cor_ans ,store_flag], outputs=[feedback_agree, feedback_disagree, feedback_uncertain, feedback_ack])
- feedback_uncertain.click(record_feedback_uncertain, inputs=[input_question, datatype, ans_ours, ans_zeroshot, self_know, kb_know, refine_know, cor_ans ,store_flag], outputs=[feedback_agree, feedback_disagree, feedback_uncertain, feedback_ack])
-
-
-demo.launch()
-
-
-
diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/gradio_canny2image.py b/spaces/Anonymous-sub/Rerender/ControlNet/gradio_canny2image.py
deleted file mode 100644
index 9866cac5b35925576c20ef4b9ee8b1b1cca235b2..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/ControlNet/gradio_canny2image.py
+++ /dev/null
@@ -1,97 +0,0 @@
-from share import *
-import config
-
-import cv2
-import einops
-import gradio as gr
-import numpy as np
-import torch
-import random
-
-from pytorch_lightning import seed_everything
-from annotator.util import resize_image, HWC3
-from annotator.canny import CannyDetector
-from cldm.model import create_model, load_state_dict
-from cldm.ddim_hacked import DDIMSampler
-
-
-apply_canny = CannyDetector()
-
-model = create_model('./models/cldm_v15.yaml').cpu()
-model.load_state_dict(load_state_dict('./models/control_sd15_canny.pth', location='cuda'))
-model = model.cuda()
-ddim_sampler = DDIMSampler(model)
-
-
-def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode, strength, scale, seed, eta, low_threshold, high_threshold):
- with torch.no_grad():
- img = resize_image(HWC3(input_image), image_resolution)
- H, W, C = img.shape
-
- detected_map = apply_canny(img, low_threshold, high_threshold)
- detected_map = HWC3(detected_map)
-
- control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
- control = torch.stack([control for _ in range(num_samples)], dim=0)
- control = einops.rearrange(control, 'b h w c -> b c h w').clone()
-
- if seed == -1:
- seed = random.randint(0, 65535)
- seed_everything(seed)
-
- if config.save_memory:
- model.low_vram_shift(is_diffusing=False)
-
- cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]}
- un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
- shape = (4, H // 8, W // 8)
-
- if config.save_memory:
- model.low_vram_shift(is_diffusing=True)
-
- model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13) # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
- samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
- shape, cond, verbose=False, eta=eta,
- unconditional_guidance_scale=scale,
- unconditional_conditioning=un_cond)
-
- if config.save_memory:
- model.low_vram_shift(is_diffusing=False)
-
- x_samples = model.decode_first_stage(samples)
- x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
-
- results = [x_samples[i] for i in range(num_samples)]
- return [255 - detected_map] + results
-
-
-block = gr.Blocks().queue()
-with block:
- with gr.Row():
- gr.Markdown("## Control Stable Diffusion with Canny Edge Maps")
- with gr.Row():
- with gr.Column():
- input_image = gr.Image(source='upload', type="numpy")
- prompt = gr.Textbox(label="Prompt")
- run_button = gr.Button(label="Run")
- with gr.Accordion("Advanced options", open=False):
- num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
- image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=64)
- strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
- guess_mode = gr.Checkbox(label='Guess Mode', value=False)
- low_threshold = gr.Slider(label="Canny low threshold", minimum=1, maximum=255, value=100, step=1)
- high_threshold = gr.Slider(label="Canny high threshold", minimum=1, maximum=255, value=200, step=1)
- ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
- scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
- seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
- eta = gr.Number(label="eta (DDIM)", value=0.0)
- a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed')
- n_prompt = gr.Textbox(label="Negative Prompt",
- value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
- with gr.Column():
- result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
- ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode, strength, scale, seed, eta, low_threshold, high_threshold]
- run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
-
-
-block.launch(server_name='0.0.0.0')
diff --git a/spaces/Anthony7906/MengHuiMXD_GPT/README.md b/spaces/Anthony7906/MengHuiMXD_GPT/README.md
deleted file mode 100644
index 7128e29689e35d059c9cc0a5050910fbd34873cd..0000000000000000000000000000000000000000
--- a/spaces/Anthony7906/MengHuiMXD_GPT/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: ChuanhuChatGPT
-emoji: 🐯
-colorFrom: green
-colorTo: red
-sdk: gradio
-sdk_version: 3.25.0
-app_file: ChuanhuChatbot.py
-pinned: false
-license: gpl-3.0
-duplicated_from: JohnSmith9982/ChuanhuChatGPT
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/Apex-X/ROOPOK/roop/globals.py b/spaces/Apex-X/ROOPOK/roop/globals.py
deleted file mode 100644
index 3eca8d0d024db967cc6d7e7149f68f65f84d7072..0000000000000000000000000000000000000000
--- a/spaces/Apex-X/ROOPOK/roop/globals.py
+++ /dev/null
@@ -1,22 +0,0 @@
-from typing import List, Optional
-
-source_path: Optional[str] = None
-target_path: Optional[str] = None
-output_path: Optional[str] = None
-headless: Optional[bool] = None
-frame_processors: List[str] = []
-keep_fps: Optional[bool] = None
-keep_frames: Optional[bool] = None
-skip_audio: Optional[bool] = None
-many_faces: Optional[bool] = None
-reference_face_position: Optional[int] = None
-reference_frame_number: Optional[int] = None
-similar_face_distance: Optional[float] = None
-temp_frame_format: Optional[str] = None
-temp_frame_quality: Optional[int] = None
-output_video_encoder: Optional[str] = None
-output_video_quality: Optional[int] = None
-max_memory: Optional[int] = None
-execution_providers: List[str] = []
-execution_threads: Optional[int] = None
-log_level: str = 'error'
diff --git a/spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/models/registry.py b/spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/models/registry.py
deleted file mode 100644
index 2d22a59eec79a2a19b83fa1779f2adaf5753aec6..0000000000000000000000000000000000000000
--- a/spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/models/registry.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# ------------------------------------------------------------------------
-# Grounding DINO
-# url: https://github.com/IDEA-Research/GroundingDINO
-# Copyright (c) 2023 IDEA. All Rights Reserved.
-# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
-# ------------------------------------------------------------------------
-# -*- coding: utf-8 -*-
-# @Author: Yihao Chen
-# @Date: 2021-08-16 16:03:17
-# @Last Modified by: Shilong Liu
-# @Last Modified time: 2022-01-23 15:26
-# modified from mmcv
-
-import inspect
-from functools import partial
-
-
-class Registry(object):
- def __init__(self, name):
- self._name = name
- self._module_dict = dict()
-
- def __repr__(self):
- format_str = self.__class__.__name__ + "(name={}, items={})".format(
- self._name, list(self._module_dict.keys())
- )
- return format_str
-
- def __len__(self):
- return len(self._module_dict)
-
- @property
- def name(self):
- return self._name
-
- @property
- def module_dict(self):
- return self._module_dict
-
- def get(self, key):
- return self._module_dict.get(key, None)
-
- def registe_with_name(self, module_name=None, force=False):
- return partial(self.register, module_name=module_name, force=force)
-
- def register(self, module_build_function, module_name=None, force=False):
- """Register a module build function.
- Args:
- module (:obj:`nn.Module`): Module to be registered.
- """
- if not inspect.isfunction(module_build_function):
- raise TypeError(
- "module_build_function must be a function, but got {}".format(
- type(module_build_function)
- )
- )
- if module_name is None:
- module_name = module_build_function.__name__
- if not force and module_name in self._module_dict:
- raise KeyError("{} is already registered in {}".format(module_name, self.name))
- self._module_dict[module_name] = module_build_function
-
- return module_build_function
-
-
-MODULE_BUILD_FUNCS = Registry("model build functions")
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/dist_info.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/dist_info.py
deleted file mode 100644
index 0685c94596f2e74642ecf57b33b6c20f937d03c0..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/command/dist_info.py
+++ /dev/null
@@ -1,142 +0,0 @@
-"""
-Create a dist_info directory
-As defined in the wheel specification
-"""
-
-import os
-import re
-import shutil
-import sys
-import warnings
-from contextlib import contextmanager
-from inspect import cleandoc
-from pathlib import Path
-
-from distutils.core import Command
-from distutils import log
-from setuptools.extern import packaging
-from setuptools._deprecation_warning import SetuptoolsDeprecationWarning
-
-
-class dist_info(Command):
-
- description = 'create a .dist-info directory'
-
- user_options = [
- ('egg-base=', 'e', "directory containing .egg-info directories"
- " (default: top of the source tree)"
- " DEPRECATED: use --output-dir."),
- ('output-dir=', 'o', "directory inside of which the .dist-info will be"
- "created (default: top of the source tree)"),
- ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
- ('tag-build=', 'b', "Specify explicit tag to add to version number"),
- ('no-date', 'D', "Don't include date stamp [default]"),
- ('keep-egg-info', None, "*TRANSITIONAL* will be removed in the future"),
- ]
-
- boolean_options = ['tag-date', 'keep-egg-info']
- negative_opt = {'no-date': 'tag-date'}
-
- def initialize_options(self):
- self.egg_base = None
- self.output_dir = None
- self.name = None
- self.dist_info_dir = None
- self.tag_date = None
- self.tag_build = None
- self.keep_egg_info = False
-
- def finalize_options(self):
- if self.egg_base:
- msg = "--egg-base is deprecated for dist_info command. Use --output-dir."
- warnings.warn(msg, SetuptoolsDeprecationWarning)
- self.output_dir = self.egg_base or self.output_dir
-
- dist = self.distribution
- project_dir = dist.src_root or os.curdir
- self.output_dir = Path(self.output_dir or project_dir)
-
- egg_info = self.reinitialize_command("egg_info")
- egg_info.egg_base = str(self.output_dir)
-
- if self.tag_date:
- egg_info.tag_date = self.tag_date
- else:
- self.tag_date = egg_info.tag_date
-
- if self.tag_build:
- egg_info.tag_build = self.tag_build
- else:
- self.tag_build = egg_info.tag_build
-
- egg_info.finalize_options()
- self.egg_info = egg_info
-
- name = _safe(dist.get_name())
- version = _version(dist.get_version())
- self.name = f"{name}-{version}"
- self.dist_info_dir = os.path.join(self.output_dir, f"{self.name}.dist-info")
-
- @contextmanager
- def _maybe_bkp_dir(self, dir_path: str, requires_bkp: bool):
- if requires_bkp:
- bkp_name = f"{dir_path}.__bkp__"
- _rm(bkp_name, ignore_errors=True)
- _copy(dir_path, bkp_name, dirs_exist_ok=True, symlinks=True)
- try:
- yield
- finally:
- _rm(dir_path, ignore_errors=True)
- shutil.move(bkp_name, dir_path)
- else:
- yield
-
- def run(self):
- self.output_dir.mkdir(parents=True, exist_ok=True)
- self.egg_info.run()
- egg_info_dir = self.egg_info.egg_info
- assert os.path.isdir(egg_info_dir), ".egg-info dir should have been created"
-
- log.info("creating '{}'".format(os.path.abspath(self.dist_info_dir)))
- bdist_wheel = self.get_finalized_command('bdist_wheel')
-
- # TODO: if bdist_wheel if merged into setuptools, just add "keep_egg_info" there
- with self._maybe_bkp_dir(egg_info_dir, self.keep_egg_info):
- bdist_wheel.egg2dist(egg_info_dir, self.dist_info_dir)
-
-
-def _safe(component: str) -> str:
- """Escape a component used to form a wheel name according to PEP 491"""
- return re.sub(r"[^\w\d.]+", "_", component)
-
-
-def _version(version: str) -> str:
- """Convert an arbitrary string to a version string."""
- v = version.replace(' ', '.')
- try:
- return str(packaging.version.Version(v)).replace("-", "_")
- except packaging.version.InvalidVersion:
- msg = f"""Invalid version: {version!r}.
- !!\n\n
- ###################
- # Invalid version #
- ###################
- {version!r} is not valid according to PEP 440.\n
- Please make sure specify a valid version for your package.
- Also note that future releases of setuptools may halt the build process
- if an invalid version is given.
- \n\n!!
- """
- warnings.warn(cleandoc(msg))
- return _safe(v).strip("_")
-
-
-def _rm(dir_name, **opts):
- if os.path.isdir(dir_name):
- shutil.rmtree(dir_name, **opts)
-
-
-def _copy(src, dst, **opts):
- if sys.version_info < (3, 8):
- opts.pop("dirs_exist_ok", None)
- shutil.copytree(src, dst, **opts)
diff --git a/spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/loss.py b/spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/loss.py
deleted file mode 100644
index cc66298a14997da4aa2efc71e37c0a6bcda53fd1..0000000000000000000000000000000000000000
--- a/spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/loss.py
+++ /dev/null
@@ -1,398 +0,0 @@
-from multiprocessing.sharedctypes import Value
-import torch
-import torch.distributed.nn
-from torch import distributed as dist, nn as nn
-from torch.nn import functional as F
-import numpy as np
-from sklearn.metrics import average_precision_score, roc_auc_score, accuracy_score
-
-try:
- import horovod.torch as hvd
-except ImportError:
- hvd = None
-
-
-def gather_features(
- audio_features,
- text_features,
- audio_features_mlp=None,
- text_features_mlp=None,
- local_loss=False,
- gather_with_grad=False,
- rank=0,
- world_size=1,
- use_horovod=False,
- mlp_loss=False,
-):
- if use_horovod:
- assert hvd is not None, "Please install horovod"
- if gather_with_grad:
- all_audio_features = hvd.allgather(audio_features)
- all_text_features = hvd.allgather(text_features)
- if mlp_loss:
- all_audio_features_mlp = hvd.allgather(audio_features_mlp)
- all_text_features_mlp = hvd.allgather(text_features_mlp)
- else:
- with torch.no_grad():
- all_audio_features = hvd.allgather(audio_features)
- all_text_features = hvd.allgather(text_features)
- if mlp_loss:
- all_audio_features_mlp = hvd.allgather(audio_features_mlp)
- all_text_features_mlp = hvd.allgather(text_features_mlp)
- if not local_loss:
- # ensure grads for local rank when all_* features don't have a gradient
- gathered_audio_features = list(
- all_audio_features.chunk(world_size, dim=0)
- )
- gathered_text_features = list(
- all_text_features.chunk(world_size, dim=0)
- )
- gathered_audio_features[rank] = audio_features
- gathered_text_features[rank] = text_features
- all_audio_features = torch.cat(gathered_audio_features, dim=0)
- all_text_features = torch.cat(gathered_text_features, dim=0)
- if mlp_loss:
- gathered_audio_features_mlp = list(
- all_audio_features_mlp.chunk(world_size, dim=0)
- )
- gathered_text_features_mlp = list(
- all_text_features_mlp.chunk(world_size, dim=0)
- )
- gathered_audio_features_mlp[rank] = audio_features_mlp
- gathered_text_features_mlp[rank] = text_features_mlp
- all_audio_features_mlp = torch.cat(
- gathered_audio_features_mlp, dim=0
- )
- all_text_features_mlp = torch.cat(gathered_text_features_mlp, dim=0)
- else:
- # We gather tensors from all gpus
- if gather_with_grad:
- all_audio_features = torch.cat(
- torch.distributed.nn.all_gather(audio_features), dim=0
- )
- all_text_features = torch.cat(
- torch.distributed.nn.all_gather(text_features), dim=0
- )
- if mlp_loss:
- all_audio_features_mlp = torch.cat(
- torch.distributed.nn.all_gather(audio_features_mlp), dim=0
- )
- all_text_features_mlp = torch.cat(
- torch.distributed.nn.all_gather(text_features_mlp), dim=0
- )
- else:
- gathered_audio_features = [
- torch.zeros_like(audio_features) for _ in range(world_size)
- ]
- gathered_text_features = [
- torch.zeros_like(text_features) for _ in range(world_size)
- ]
- dist.all_gather(gathered_audio_features, audio_features)
- dist.all_gather(gathered_text_features, text_features)
- if mlp_loss:
- gathered_audio_features_mlp = [
- torch.zeros_like(audio_features_mlp) for _ in range(world_size)
- ]
- gathered_text_features_mlp = [
- torch.zeros_like(text_features_mlp) for _ in range(world_size)
- ]
- dist.all_gather(gathered_audio_features_mlp, audio_features_mlp)
- dist.all_gather(gathered_text_features_mlp, text_features_mlp)
- if not local_loss:
- # ensure grads for local rank when all_* features don't have a gradient
- gathered_audio_features[rank] = audio_features
- gathered_text_features[rank] = text_features
- if mlp_loss:
- gathered_audio_features_mlp[rank] = audio_features_mlp
- gathered_text_features_mlp[rank] = text_features_mlp
-
- all_audio_features = torch.cat(gathered_audio_features, dim=0)
- all_text_features = torch.cat(gathered_text_features, dim=0)
- if mlp_loss:
- all_audio_features_mlp = torch.cat(gathered_audio_features_mlp, dim=0)
- all_text_features_mlp = torch.cat(gathered_text_features_mlp, dim=0)
- if mlp_loss:
- return (
- all_audio_features,
- all_text_features,
- all_audio_features_mlp,
- all_text_features_mlp,
- )
- else:
- return all_audio_features, all_text_features
-
-
-class ClipLoss(nn.Module):
- def __init__(
- self,
- local_loss=False,
- gather_with_grad=False,
- cache_labels=False,
- rank=0,
- world_size=1,
- use_horovod=False,
- mlp_loss=False,
- weight_loss_kappa=0,
- ):
- super().__init__()
- self.local_loss = local_loss
- self.gather_with_grad = gather_with_grad
- self.cache_labels = cache_labels
- self.rank = rank
- self.world_size = world_size
- self.use_horovod = use_horovod
- self.mlp_loss = mlp_loss
- self.weighted_loss = bool(weight_loss_kappa != 0)
- self.weight_loss_kappa = weight_loss_kappa
- # cache state
- self.prev_num_logits = 0
- self.labels = {}
-
- def forward(
- self,
- audio_features,
- text_features,
- logit_scale_a,
- logit_scale_t=None,
- audio_features_mlp=None,
- text_features_mlp=None,
- ):
- device = audio_features.device
- if self.mlp_loss:
- if self.world_size > 1:
- (
- all_audio_features,
- all_text_features,
- all_audio_features_mlp,
- all_text_features_mlp,
- ) = gather_features(
- audio_features=audio_features,
- text_features=text_features,
- audio_features_mlp=audio_features_mlp,
- text_features_mlp=text_features_mlp,
- local_loss=self.local_loss,
- gather_with_grad=self.gather_with_grad,
- rank=self.rank,
- world_size=self.world_size,
- use_horovod=self.use_horovod,
- mlp_loss=self.mlp_loss,
- )
- if self.local_loss:
- a_logits_per_audio = (
- logit_scale_a * audio_features @ all_text_features_mlp.T
- )
- a_logits_per_text = (
- logit_scale_a * text_features_mlp @ all_audio_features.T
- )
- t_logits_per_audio = (
- logit_scale_t * audio_features_mlp @ all_text_features.T
- )
- t_logits_per_text = (
- logit_scale_t * text_features @ all_audio_features_mlp.T
- )
- else:
- a_logits_per_audio = (
- logit_scale_a * all_audio_features @ all_text_features_mlp.T
- )
- a_logits_per_text = a_logits_per_audio.T
- t_logits_per_audio = (
- logit_scale_t * all_audio_features_mlp @ all_text_features.T
- )
- t_logits_per_text = t_logits_per_audio.T
- else:
- a_logits_per_audio = (
- logit_scale_a * audio_features @ text_features_mlp.T
- )
- a_logits_per_text = logit_scale_a * text_features_mlp @ audio_features.T
- t_logits_per_audio = (
- logit_scale_t * audio_features_mlp @ text_features.T
- )
- t_logits_per_text = logit_scale_t * text_features @ audio_features_mlp.T
-
- # calculated ground-truth and cache if enabled
- num_logits = a_logits_per_audio.shape[0]
- if self.prev_num_logits != num_logits or device not in self.labels:
- labels = torch.arange(num_logits, device=device, dtype=torch.long)
- if self.world_size > 1 and self.local_loss:
- labels = labels + num_logits * self.rank
- if self.cache_labels:
- self.labels[device] = labels
- self.prev_num_logits = num_logits
- else:
- labels = self.labels[device]
-
- if not self.weighted_loss:
- total_loss = (
- F.cross_entropy(a_logits_per_audio, labels)
- + F.cross_entropy(a_logits_per_text, labels)
- + F.cross_entropy(t_logits_per_audio, labels)
- + F.cross_entropy(t_logits_per_text, labels)
- ) / 4
- else:
- audio_weight = (audio_features @ audio_features.T).detach()
- audio_weight = (
- torch.exp(
- torch.sum(audio_weight, axis=1)
- / (self.weight_loss_kappa * len(audio_weight))
- )
- ).detach()
- text_weight = (text_features @ text_features.T).detach()
- text_weight = (
- torch.exp(
- torch.sum(text_weight, axis=1)
- / (self.weight_loss_kappa * len(text_features))
- )
- ).detach()
- total_loss = (
- F.cross_entropy(a_logits_per_audio, labels, weight=audio_weight)
- + F.cross_entropy(a_logits_per_text, labels, weight=audio_weight)
- + F.cross_entropy(t_logits_per_audio, labels, weight=text_weight)
- + F.cross_entropy(t_logits_per_text, labels, weight=text_weight)
- ) / 4
- else:
- if self.world_size > 1:
- all_audio_features, all_text_features = gather_features(
- audio_features=audio_features,
- text_features=text_features,
- local_loss=self.local_loss,
- gather_with_grad=self.gather_with_grad,
- rank=self.rank,
- world_size=self.world_size,
- use_horovod=self.use_horovod,
- mlp_loss=self.mlp_loss,
- )
-
- if self.local_loss:
- logits_per_audio = (
- logit_scale_a * audio_features @ all_text_features.T
- )
- logits_per_text = (
- logit_scale_a * text_features @ all_audio_features.T
- )
- else:
- logits_per_audio = (
- logit_scale_a * all_audio_features @ all_text_features.T
- )
- logits_per_text = logits_per_audio.T
- else:
- logits_per_audio = logit_scale_a * audio_features @ text_features.T
- logits_per_text = logit_scale_a * text_features @ audio_features.T
-
- # calculated ground-truth and cache if enabled
- num_logits = logits_per_audio.shape[0]
- if self.prev_num_logits != num_logits or device not in self.labels:
- labels = torch.arange(num_logits, device=device, dtype=torch.long)
- if self.world_size > 1 and self.local_loss:
- labels = labels + num_logits * self.rank
- if self.cache_labels:
- self.labels[device] = labels
- self.prev_num_logits = num_logits
- else:
- labels = self.labels[device]
- if not self.weighted_loss:
- total_loss = (
- F.cross_entropy(logits_per_audio, labels)
- + F.cross_entropy(logits_per_text, labels)
- ) / 2
- else:
- audio_weight = (all_audio_features @ all_audio_features.T).detach()
- audio_weight = (
- torch.exp(
- torch.sum(audio_weight, axis=1)
- / (self.weight_loss_kappa * len(all_audio_features))
- )
- ).detach()
- text_weight = (all_text_features @ all_text_features.T).detach()
- text_weight = (
- torch.exp(
- torch.sum(text_weight, axis=1)
- / (self.weight_loss_kappa * len(all_text_features))
- )
- ).detach()
- total_loss = (
- F.cross_entropy(logits_per_audio, labels, weight=text_weight)
- + F.cross_entropy(logits_per_text, labels, weight=audio_weight)
- ) / 2
- return total_loss
-
-
-def lp_gather_features(pred, target, world_size=1, use_horovod=False):
- if use_horovod:
- assert hvd is not None, "Please install horovod"
- with torch.no_grad():
- all_preds = hvd.allgather(pred)
- all_targets = hvd.allgath(target)
- else:
- gathered_preds = [torch.zeros_like(pred) for _ in range(world_size)]
- gathered_targets = [torch.zeros_like(target) for _ in range(world_size)]
-
- dist.all_gather(gathered_preds, pred)
- dist.all_gather(gathered_targets, target)
- all_preds = torch.cat(gathered_preds, dim=0)
- all_targets = torch.cat(gathered_targets, dim=0)
-
- return all_preds, all_targets
-
-
-def get_map(pred, target):
- pred = torch.sigmoid(pred).numpy()
- target = target.numpy()
- return np.mean(average_precision_score(target, pred, average=None))
-
-
-def get_acc(pred, target):
- pred = torch.argmax(pred, 1).numpy()
- target = torch.argmax(target, 1).numpy()
- return accuracy_score(target, pred)
-
-
-def get_mauc(pred, target):
- pred = torch.sigmoid(pred).numpy()
- target = target.numpy()
- return np.mean(roc_auc_score(target, pred, average=None))
-
-
-class LPMetrics(object):
- def __init__(self, metric_names=["map", "acc", "mauc"]):
- self.metrics = []
- for name in metric_names:
- self.metrics.append(self.get_metric(name))
- self.metric_names = metric_names
-
- def get_metric(self, name):
- if name == "map":
- return get_map
- elif name == "acc":
- return get_acc
- elif name == "mauc":
- return get_mauc
- else:
- raise ValueError(f"the metric should be at least one of [map, acc, mauc]")
-
- def evaluate_mertics(self, pred, target):
- metric_dict = {}
- for i in range(len(self.metric_names)):
- metric_dict[self.metric_names[i]] = self.metrics[i](pred, target)
- return metric_dict
-
-
-def calc_celoss(pred, target):
- target = torch.argmax(target, 1).long()
- return nn.CrossEntropyLoss()(pred, target)
-
-
-class LPLoss(nn.Module):
- def __init__(self, loss_name):
- super().__init__()
- if loss_name == "bce":
- self.loss_func = nn.BCEWithLogitsLoss()
- elif loss_name == "ce":
- self.loss_func = calc_celoss
- elif loss_name == "mse":
- self.loss_func = nn.MSELoss()
- else:
- raise ValueError(f"the loss func should be at least one of [bce, ce, mse]")
-
- def forward(self, pred, target):
- loss = self.loss_func(pred, target)
- return loss
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/coco_schedule.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/coco_schedule.py
deleted file mode 100644
index 355e66a1d213cb599a7ffe55089d854089c8ead2..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/coco_schedule.py
+++ /dev/null
@@ -1,47 +0,0 @@
-from fvcore.common.param_scheduler import MultiStepParamScheduler
-
-from detectron2.config import LazyCall as L
-from detectron2.solver import WarmupParamScheduler
-
-
-def default_X_scheduler(num_X):
- """
- Returns the config for a default multi-step LR scheduler such as "1x", "3x",
- commonly referred to in papers, where every 1x has the total length of 1440k
- training images (~12 COCO epochs). LR is decayed twice at the end of training
- following the strategy defined in "Rethinking ImageNet Pretraining", Sec 4.
-
- Args:
- num_X: a positive real number
-
- Returns:
- DictConfig: configs that define the multiplier for LR during training
- """
- # total number of iterations assuming 16 batch size, using 1440000/16=90000
- total_steps_16bs = num_X * 90000
-
- if num_X <= 2:
- scheduler = L(MultiStepParamScheduler)(
- values=[1.0, 0.1, 0.01],
- # note that scheduler is scale-invariant. This is equivalent to
- # milestones=[6, 8, 9]
- milestones=[60000, 80000, 90000],
- )
- else:
- scheduler = L(MultiStepParamScheduler)(
- values=[1.0, 0.1, 0.01],
- milestones=[total_steps_16bs - 60000, total_steps_16bs - 20000, total_steps_16bs],
- )
- return L(WarmupParamScheduler)(
- scheduler=scheduler,
- warmup_length=1000 / total_steps_16bs,
- warmup_method="linear",
- warmup_factor=0.001,
- )
-
-
-lr_multiplier_1x = default_X_scheduler(1)
-lr_multiplier_2x = default_X_scheduler(2)
-lr_multiplier_3x = default_X_scheduler(3)
-lr_multiplier_6x = default_X_scheduler(6)
-lr_multiplier_9x = default_X_scheduler(9)
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/conf.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/conf.py
deleted file mode 100644
index c7232f41f7540d6d0c2e36827c9bd18d36fffac7..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/conf.py
+++ /dev/null
@@ -1,382 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-# flake8: noqa
-
-# Configuration file for the Sphinx documentation builder.
-#
-# This file does only contain a selection of the most common options. For a
-# full list see the documentation:
-# http://www.sphinx-doc.org/en/master/config
-
-# -- Path setup --------------------------------------------------------------
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-#
-import os
-import sys
-from unittest import mock
-from sphinx.domains import Domain
-from typing import Dict, List, Tuple
-
-# The theme to use for HTML and HTML Help pages. See the documentation for
-# a list of builtin themes.
-#
-import sphinx_rtd_theme
-
-
-class GithubURLDomain(Domain):
- """
- Resolve certain links in markdown files to github source.
- """
-
- name = "githuburl"
- ROOT = "https://github.com/facebookresearch/detectron2/blob/main/"
- LINKED_DOC = ["tutorials/install", "tutorials/getting_started"]
-
- def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
- github_url = None
- if not target.endswith("html") and target.startswith("../../"):
- url = target.replace("../", "")
- github_url = url
- if fromdocname in self.LINKED_DOC:
- # unresolved links in these docs are all github links
- github_url = target
-
- if github_url is not None:
- if github_url.endswith("MODEL_ZOO") or github_url.endswith("README"):
- # bug of recommonmark.
- # https://github.com/readthedocs/recommonmark/blob/ddd56e7717e9745f11300059e4268e204138a6b1/recommonmark/parser.py#L152-L155
- github_url += ".md"
- print("Ref {} resolved to github:{}".format(target, github_url))
- contnode["refuri"] = self.ROOT + github_url
- return [("githuburl:any", contnode)]
- else:
- return []
-
-
-# to support markdown
-from recommonmark.parser import CommonMarkParser
-
-sys.path.insert(0, os.path.abspath("../"))
-os.environ["_DOC_BUILDING"] = "True"
-DEPLOY = os.environ.get("READTHEDOCS") == "True"
-
-
-# -- Project information -----------------------------------------------------
-
-# fmt: off
-try:
- import torch # noqa
-except ImportError:
- for m in [
- "torch", "torchvision", "torch.nn", "torch.nn.parallel", "torch.distributed", "torch.multiprocessing", "torch.autograd",
- "torch.autograd.function", "torch.nn.modules", "torch.nn.modules.utils", "torch.utils", "torch.utils.data", "torch.onnx",
- "torchvision", "torchvision.ops",
- ]:
- sys.modules[m] = mock.Mock(name=m)
- sys.modules['torch'].__version__ = "1.7" # fake version
- HAS_TORCH = False
-else:
- try:
- torch.ops.detectron2 = mock.Mock(name="torch.ops.detectron2")
- except:
- pass
- HAS_TORCH = True
-
-for m in [
- "cv2", "scipy", "portalocker", "detectron2._C",
- "pycocotools", "pycocotools.mask", "pycocotools.coco", "pycocotools.cocoeval",
- "google", "google.protobuf", "google.protobuf.internal", "onnx",
- "caffe2", "caffe2.proto", "caffe2.python", "caffe2.python.utils", "caffe2.python.onnx", "caffe2.python.onnx.backend",
-]:
- sys.modules[m] = mock.Mock(name=m)
-# fmt: on
-sys.modules["cv2"].__version__ = "3.4"
-
-import detectron2 # isort: skip
-
-if HAS_TORCH:
- from detectron2.utils.env import fixup_module_metadata
-
- fixup_module_metadata("torch.nn", torch.nn.__dict__)
- fixup_module_metadata("torch.utils.data", torch.utils.data.__dict__)
-
-
-project = "detectron2"
-copyright = "2019-2020, detectron2 contributors"
-author = "detectron2 contributors"
-
-# The short X.Y version
-version = detectron2.__version__
-# The full version, including alpha/beta/rc tags
-release = version
-
-
-# -- General configuration ---------------------------------------------------
-
-# If your documentation needs a minimal Sphinx version, state it here.
-#
-needs_sphinx = "3.0"
-
-# Add any Sphinx extension module names here, as strings. They can be
-# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
-# ones.
-extensions = [
- "recommonmark",
- "sphinx.ext.autodoc",
- "sphinx.ext.napoleon",
- "sphinx.ext.intersphinx",
- "sphinx.ext.todo",
- "sphinx.ext.coverage",
- "sphinx.ext.mathjax",
- "sphinx.ext.viewcode",
- "sphinx.ext.githubpages",
-]
-
-# -- Configurations for plugins ------------
-napoleon_google_docstring = True
-napoleon_include_init_with_doc = True
-napoleon_include_special_with_doc = True
-napoleon_numpy_docstring = False
-napoleon_use_rtype = False
-autodoc_inherit_docstrings = False
-autodoc_member_order = "bysource"
-
-if DEPLOY:
- intersphinx_timeout = 10
-else:
- # skip this when building locally
- intersphinx_timeout = 0.5
-intersphinx_mapping = {
- "python": ("https://docs.python.org/3.6", None),
- "numpy": ("https://docs.scipy.org/doc/numpy/", None),
- "torch": ("https://pytorch.org/docs/master/", None),
-}
-# -------------------------
-
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ["_templates"]
-
-source_suffix = [".rst", ".md"]
-
-# The master toctree document.
-master_doc = "index"
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#
-# This is also used if you do content translation via gettext catalogs.
-# Usually you set "language" from the command line for these cases.
-language = None
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-# This pattern also affects html_static_path and html_extra_path.
-exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "build", "README.md", "tutorials/README.md"]
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = "sphinx"
-
-
-# -- Options for HTML output -------------------------------------------------
-
-html_theme = "sphinx_rtd_theme"
-html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further. For a list of options available for each theme, see the
-# documentation.
-#
-# html_theme_options = {}
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ["_static"]
-html_css_files = ["css/custom.css"]
-
-# Custom sidebar templates, must be a dictionary that maps document names
-# to template names.
-#
-# The default sidebars (for documents that don't match any pattern) are
-# defined by theme itself. Builtin themes are using these templates by
-# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
-# 'searchbox.html']``.
-#
-# html_sidebars = {}
-
-
-# -- Options for HTMLHelp output ---------------------------------------------
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = "detectron2doc"
-
-
-# -- Options for LaTeX output ------------------------------------------------
-
-latex_elements = {
- # The paper size ('letterpaper' or 'a4paper').
- #
- # 'papersize': 'letterpaper',
- # The font size ('10pt', '11pt' or '12pt').
- #
- # 'pointsize': '10pt',
- # Additional stuff for the LaTeX preamble.
- #
- # 'preamble': '',
- # Latex figure (float) alignment
- #
- # 'figure_align': 'htbp',
-}
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title,
-# author, documentclass [howto, manual, or own class]).
-latex_documents = [
- (master_doc, "detectron2.tex", "detectron2 Documentation", "detectron2 contributors", "manual")
-]
-
-
-# -- Options for manual page output ------------------------------------------
-
-# One entry per manual page. List of tuples
-# (source start file, name, description, authors, manual section).
-man_pages = [(master_doc, "detectron2", "detectron2 Documentation", [author], 1)]
-
-
-# -- Options for Texinfo output ----------------------------------------------
-
-# Grouping the document tree into Texinfo files. List of tuples
-# (source start file, target name, title, author,
-# dir menu entry, description, category)
-texinfo_documents = [
- (
- master_doc,
- "detectron2",
- "detectron2 Documentation",
- author,
- "detectron2",
- "One line description of project.",
- "Miscellaneous",
- )
-]
-
-
-# -- Options for todo extension ----------------------------------------------
-
-# If true, `todo` and `todoList` produce output, else they produce nothing.
-todo_include_todos = True
-
-
-def autodoc_skip_member(app, what, name, obj, skip, options):
- # we hide something deliberately
- if getattr(obj, "__HIDE_SPHINX_DOC__", False):
- return True
-
- # Hide some that are deprecated or not intended to be used
- HIDDEN = {
- "ResNetBlockBase",
- "GroupedBatchSampler",
- "build_transform_gen",
- "apply_transform_gens",
- "TransformGen",
- "apply_augmentations",
- "StandardAugInput",
- "build_batch_data_loader",
- "draw_panoptic_seg_predictions",
- "WarmupCosineLR",
- "WarmupMultiStepLR",
- "downgrade_config",
- "upgrade_config",
- "add_export_config",
- }
- try:
- if name in HIDDEN or (
- hasattr(obj, "__doc__") and obj.__doc__.lower().strip().startswith("deprecated")
- ):
- print("Skipping deprecated object: {}".format(name))
- return True
- except:
- pass
- return skip
-
-
-_PAPER_DATA = {
- "resnet": ("1512.03385", "Deep Residual Learning for Image Recognition"),
- "fpn": ("1612.03144", "Feature Pyramid Networks for Object Detection"),
- "mask r-cnn": ("1703.06870", "Mask R-CNN"),
- "faster r-cnn": (
- "1506.01497",
- "Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks",
- ),
- "deformconv": ("1703.06211", "Deformable Convolutional Networks"),
- "deformconv2": ("1811.11168", "Deformable ConvNets v2: More Deformable, Better Results"),
- "panopticfpn": ("1901.02446", "Panoptic Feature Pyramid Networks"),
- "retinanet": ("1708.02002", "Focal Loss for Dense Object Detection"),
- "cascade r-cnn": ("1712.00726", "Cascade R-CNN: Delving into High Quality Object Detection"),
- "lvis": ("1908.03195", "LVIS: A Dataset for Large Vocabulary Instance Segmentation"),
- "rrpn": ("1703.01086", "Arbitrary-Oriented Scene Text Detection via Rotation Proposals"),
- "imagenet in 1h": ("1706.02677", "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour"),
- "xception": ("1610.02357", "Xception: Deep Learning with Depthwise Separable Convolutions"),
- "mobilenet": (
- "1704.04861",
- "MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications",
- ),
- "deeplabv3+": (
- "1802.02611",
- "Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation",
- ),
- "dds": ("2003.13678", "Designing Network Design Spaces"),
- "scaling": ("2103.06877", "Fast and Accurate Model Scaling"),
- "fcos": ("2006.09214", "FCOS: A Simple and Strong Anchor-free Object Detector"),
- "rethinking-batchnorm": ("2105.07576", 'Rethinking "Batch" in BatchNorm'),
-}
-
-
-def paper_ref_role(
- typ: str,
- rawtext: str,
- text: str,
- lineno: int,
- inliner,
- options: Dict = {},
- content: List[str] = [],
-):
- """
- Parse :paper:`xxx`. Similar to the "extlinks" sphinx extension.
- """
- from docutils import nodes, utils
- from sphinx.util.nodes import split_explicit_title
-
- text = utils.unescape(text)
- has_explicit_title, title, link = split_explicit_title(text)
- link = link.lower()
- if link not in _PAPER_DATA:
- inliner.reporter.warning("Cannot find paper " + link)
- paper_url, paper_title = "#", link
- else:
- paper_url, paper_title = _PAPER_DATA[link]
- if "/" not in paper_url:
- paper_url = "https://arxiv.org/abs/" + paper_url
- if not has_explicit_title:
- title = paper_title
- pnode = nodes.reference(title, title, internal=False, refuri=paper_url)
- return [pnode], []
-
-
-def setup(app):
- from recommonmark.transform import AutoStructify
-
- app.add_domain(GithubURLDomain)
- app.connect("autodoc-skip-member", autodoc_skip_member)
- app.add_role("paper", paper_ref_role)
- app.add_config_value(
- "recommonmark_config",
- {"enable_math": True, "enable_inline_math": True, "enable_eval_rst": True},
- True,
- )
- app.add_transform(AutoStructify)
diff --git a/spaces/Benson/text-generation/Examples/Counter Strike Global Offensive Apk Download Pc.md b/spaces/Benson/text-generation/Examples/Counter Strike Global Offensive Apk Download Pc.md
deleted file mode 100644
index c3417361d057f91608a43836f1f7d1502279497b..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Counter Strike Global Offensive Apk Download Pc.md
+++ /dev/null
@@ -1,119 +0,0 @@
-
-
Contra-Huelga: Ofensiva Global APK Descargar PC
-
Si usted está buscando una manera de descargar y jugar uno de los juegos de disparos más populares del mundo, usted ha venido al lugar correcto. En este artículo, te mostraremos cómo descargar e instalar Counter-Strike: Global Offensive (CS:GO) en tu PC de forma gratuita. También te daremos una visión general del juego y sus características, así como algunos consejos y trucos para optimizarlo para tu PC. Así que, sin más preámbulos, ¡empecemos!
-
¿Qué es Contraataque: Ofensiva Global?
-
Counter-Strike: Global Offensive es un juego lleno de acción que se basa en la popular franquicia Counter-Strike. Cuenta con tiradores en primera persona que mueren permanentemente cuando son atacados. Los jugadores luchan contra los terroristas como parte de un equipo y participan en misiones de rescate y ataques. Diseñado para Windows, CS: GO cuenta con nuevos personajes, mapas, armas y modos de juego. Entre diferentes rondas, los jugadores pueden gastar el dinero que ganan en armas y ropa. Un partido termina cuando todos los jugadores del equipo están muertos. El juego de descarga gratuita es bastante popular y da competencia a aplicaciones similares como Call of Duty: Modern Warfare 3, Assassin’s Creed y PUBG Mobile.
Hay muchas razones por las que deberías jugar a Counter-Strike: Global Offensive en tu PC. Estas son algunas de ellas:
-
-
Es realista. A diferencia de otros juegos de disparos, Counter-Strike: Global Offensive ofrece poco espacio para la modificación del jugador, por lo que todo el mundo está atascado con el equipo y la habilidad que poseen. Como jugador, debe esperar hasta la siguiente ronda para volver a la vida si muere durante un partido. Esto hace que el juego sea más desafiante y gratificante.
-bombas, y matar a los enemigos. También puede jugar diferentes modos de juego, tales como deathmatch, carrera de armamentos, demolición, y más. También puedes personalizar tus armas y pieles para adaptarlas a tu estilo. El juego es rápido, emocionante y adictivo.
-
-
-
Los diferentes modos de juego de Counter-Strike: Ofensiva Global
-
Counter-Strike: Global Offensive ofrece ocho modos de juego diferentes entre los que puedes elegir. Cada modo de juego tiene sus propias reglas, objetivos y mapas. Estos son los modos de juego que puedes jugar:
-
-
Competitivo. Este es el modo clásico de Counter-Strike: Ofensiva Global, donde dos equipos de cinco jugadores compiten en un partido al mejor de 30. El primer equipo en ganar 16 rondas gana el partido. Los jugadores pueden elegir entre un conjunto de mapas oficiales o mapas hechos por la comunidad. El modo competitivo tiene un sistema de clasificación que coincide con jugadores con niveles de habilidad similares.
-
Casual. Este es un modo más relajado de Counter-Strike: Ofensiva Global, donde los jugadores pueden unirse o dejar un partido en cualquier momento. No hay penalizaciones por irse o matar en equipo, y el fuego amigo está desactivado. Los jugadores también pueden comprar cualquier arma que quieran en cualquier momento. El modo casual tiene dos sub-modos: desactivar y rehén.
-
Deathmatch. Este es un modo gratuito de Counter-Strike: Global Offensive, donde los jugadores reaparecen instantáneamente después de morir y tratan de matar a tantos enemigos como sea posible en un tiempo limitado. Los jugadores pueden elegir cualquier arma que quieran en cualquier momento, y obtienen puntos de bonificación por matar con diferentes armas. El modo deathmatch no tiene equipos ni objetivos.
-
Carrera de Armas. Este es un modo de progresión de armas de Counter-Strike: Ofensiva Global, donde los jugadores comienzan con un arma básica y se actualizan a una mejor después de cada muerte. El primer jugador en conseguir una muerte con el cuchillo de oro gana el partido. El modo Carrera de Armas tiene dos equipos y ningún objetivo.
-
-
Zona de peligro. Este es un modo de batalla real de Counter-Strike: Global Offensive, donde hasta 18 jugadores se lanzan en paracaídas en un mapa grande y luchan por ser el último en pie. Los jugadores deben buscar armas, municiones, dinero y objetos, mientras evitan a los enemigos y una zona segura que se reduce. El modo Zona de peligro tiene opciones en solitario, dúo o trío.
-
Wingman. Este es un modo 2v2 de Counter-Strike: Global Offensive, donde dos equipos de dos jugadores compiten en un partido al mejor de 16. Los terroristas deben colocar la bomba en el único lugar, mientras que los antiterroristas deben desactivarla o eliminar a todos los terroristas. El modo Wingman tiene un sistema de clasificación que coincide con jugadores con niveles de habilidad similares.
-
Flying Scoutsman. Este es un modo de baja gravedad de Counter-Strike: Global Offensive, donde los jugadores solo tienen rifles de francotirador SSG 08 y cuchillos. Los jugadores pueden saltar más alto y moverse más rápido en este modo. El modo Explorador volador no tiene equipos ni objetivos.
-
-
Las armas y la personalización de Counter-Strike: Ofensiva Global
-
Counter-Strike: Global Offensive cuenta con más de 40 armas diferentes que puedes usar en el juego. Estas armas se dividen en cinco categorías: pistolas, armas pesadas, metralletas (SMG), rifles y granadas. Cada arma tiene sus propias estadísticas, como daños, precisión, retroceso, cadencia de fuego y tamaño del cargador. Algunas armas son exclusivas de los terroristas o de los contraterroristas, mientras que otras son compartidas por ambas partes.
-
ins tienen diferentes rarezas, cualidades y patrones, que afectan su valor y apariencia. Algunas pieles son muy raras y caras, mientras que otras son comunes y baratas. También puedes aplicar pegatinas, graffitis o parches a tus armas o personajes para personalizarlos aún más.
-
Cómo descargar Counter-Strike: Ofensiva global APK para PC?
-
-
Descargar CS:GO vía torrent
-
Una forma de descargar Counter-Strike: Global Offensive para tu PC es usar un cliente torrent. Un cliente torrent es un software que le permite descargar archivos de otros usuarios que tienen los mismos archivos. De esta manera, puede descargar archivos grandes más rápido y de manera más eficiente. Sin embargo, debe tener cuidado al usar torrents, ya que algunos de ellos pueden contener virus o malware que pueden dañar su PC. También debe usar una VPN (red privada virtual) para proteger su privacidad y evitar problemas legales.
-
Los mejores sitios de torrent para CS:GO
-
Hay muchos sitios de torrents que ofrecen Counter-Strike: Global Offensive para descargar, pero no todos son confiables y seguros. Algunos de ellos pueden tener archivos falsos o obsoletos, mientras que otros pueden tener sembradoras o sanguijuelas bajas, lo que significa velocidad de descarga lenta. Para ayudarle a encontrar los mejores sitios de torrent para CS:GO, hemos compilado una lista de algunos de los más confiables y populares:
-
-
-
The Pirate Bay. Este es uno de los sitios de torrents más antiguos y famosos del mundo. Tiene una gran colección de torrents para varias categorías, incluyendo juegos, películas, música, software y más. Puedes encontrar fácilmente Counter-Strike: Global Offensive buscándolo en la barra de búsqueda o navegando por la sección de juegos. The Pirate Bay también tiene un sistema de valoración de usuarios que le muestra la calidad y la seguridad de cada torrent.
-
RARBG. Este es otro conocido sitio de torrents que ofrece torrents de alta calidad para varios géneros. Tiene una interfaz sencilla y fácil de usar que facilita la navegación y la búsqueda de los archivos deseados. Puede encontrar Counter-Strike: Global Offensive escribiendo su nombre en el cuadro de búsqueda o filtrando por categoría y fecha. RARBG también tiene una sección de comentarios que le permite leer comentarios de otros usuarios sobre cada torrent.
-
-
LimeTorrents. Este es un sitio de torrents confiable que tiene un diseño limpio y simple. Tiene una colección decente de torrents para varios nichos, como juegos, películas, música, software, anime y más. Puedes encontrar Counter-Strike: Global Offensive introduciendo su nombre en la barra de búsqueda o ordenando por categoría y fecha. LimeTorrents también tiene un medidor de salud que indica el estado de cada torrent.
-
Torrentz2. Este es un motor de meta-búsqueda que agrega torrents de múltiples fuentes. No aloja ningún archivo en sí, sino que lo redirige a otros sitios de torrent que tienen los archivos que está buscando. Puedes encontrar Counter-Strike: Global Offensive escribiendo su nombre en el cuadro de búsqueda o navegando por la categoría de juegos. Torrentz2 también muestra el número de rastreadores y pares para cada torrent.
-
-
El proceso de instalación de CS:GO a través de torrent
-
Una vez que haya elegido un sitio de torrent y haya descargado el archivo Counter-Strike: Global Offensive, debe instalarlo en su PC. El proceso de instalación puede variar dependiendo del formato del archivo y el origen, pero generalmente sigue estos pasos:
-
-
Extraiga el archivo. La mayoría de las veces, el archivo que descargue será comprimido en un formato ZIP o RAR. Debe extraerlo con un software como WinRAR o 7-Zip. Para ello, haga clic con el botón derecho en el archivo y seleccione "Extraer aquí" o "Extraer en la carpeta". Esto creará una nueva carpeta con los archivos del juego dentro.
-.exe" o "install.exe". Este es el archivo que instalará el juego en su PC. Haga doble clic en él y siga las instrucciones en la pantalla. Es posible que tenga que elegir una carpeta de destino, aceptar los términos y condiciones, e introducir una clave de serie si es necesario.
-
-
-
Descargar CS:GO vía launcher
-
Otra forma de descargar Counter-Strike: Ofensiva Global para tu PC es usar un lanzador. Un lanzador es un software que te permite descargar, instalar y actualizar el juego automáticamente. No necesita usar un cliente torrent ni extraer ningún archivo manualmente. Sin embargo, es posible que necesite crear una cuenta e iniciar sesión en el lanzador para acceder al juego. También es posible que necesite tener una conexión a Internet estable y suficiente espacio en disco para el juego.
-
Los mejores lanzadores para CS:GO
-
Hay varios lanzadores que puedes usar para descargar y jugar Counter-Strike: Global Offensive en tu PC. Algunos de ellos son oficiales, mientras que otros son no oficiales o hechos por fans. Estos son algunos de los mejores lanzadores para CS:GO:
-
-
Steam. Este es el lanzador oficial y más popular para Counter-Strike: Global Offensive. Steam es una plataforma de distribución digital que ofrece miles de juegos, incluyendo CS:GO, para descargar y comprar. Steam también ofrece multijugador en línea, chat, logros, almacenamiento en la nube y más funciones para sus usuarios. Puedes descargar Steam gratis desde su sitio web y crear una cuenta para acceder a sus servicios.
-
CS:GO Launcher. Este es un lanzador no oficial para Counter-Strike: Global Offensive que te permite jugar el juego sin Steam. CS:GO Launcher descarga e instala la última versión del juego automáticamente y le permite elegir entre diferentes servidores y mods. También puede personalizar sus ajustes, skins y configuraciones con este lanzador. Puede descargar CS:GO Launcher gratis desde su sitio web y crear una cuenta para acceder a sus funciones.
-
-
-
El proceso de instalación de CS:GO vía launcher
-
El proceso de instalación de Counter-Strike: Ofensiva global a través de lanzador es similar para la mayoría de lanzadores, pero puede diferir ligeramente dependiendo del lanzador que elija. Generalmente, sigue estos pasos:
-
-
Descargar el lanzador. Ir al sitio web del lanzador que desea utilizar y descargar su archivo. El tamaño del archivo puede variar dependiendo del lanzador, pero no debe ser demasiado grande. Guarde el archivo en una ubicación donde pueda encontrarlo fácilmente más tarde.
-
Instale el lanzador. Ejecute el archivo que descargó y siga las instrucciones en la pantalla. Es posible que tenga que elegir una carpeta de destino, aceptar los términos y condiciones, e introducir los detalles de su cuenta si es necesario.
-
Descargar el juego. Después de instalar el lanzador, ábralo y busque Counter-Strike: Global Offensive en su biblioteca o tienda. Haga clic en él y seleccione "Descargar" o "Instalar". El lanzador descargará e instalará los archivos del juego automáticamente. El tiempo de descarga puede variar dependiendo de la velocidad de Internet y el espacio en disco.
-
Ejecutar el juego. Después de descargar e instalar el juego, puede ejecutarlo haciendo clic en su icono en el lanzador o en el escritorio. Es posible que tenga que iniciar sesión en su cuenta o verificar su correo electrónico si es necesario. También puede cambiar la configuración del juego en el menú de opciones.
-
-
Cómo optimizar Counter-Strike: Ofensiva global para PC?
-
Ahora que ha descargado e instalado Counter-Strike: Global Offensive en su PC, es posible que desee optimizarlo para un mejor rendimiento y gráficos. Hay algunos consejos y trucos que puedes utilizar para mejorar tu experiencia de juego y evitar errores o fallos. Estos son algunos de ellos:
- la configuración de Counter-Strike: Ofensiva Global?
-
-
Los ajustes recomendados para Counter-Strike: Ofensiva Global
-
La configuración óptima para Counter-Strike: Global Offensive puede variar dependiendo de las especificaciones de su PC, como CPU, GPU, RAM y monitor. Sin embargo, como regla general, debes buscar un equilibrio entre rendimiento y calidad. No quieres sacrificar demasiados gráficos por la velocidad, o viceversa. Para ayudarle a encontrar la mejor configuración para su PC, hemos creado una tabla de ajustes recomendados para diferentes especificaciones de PC:
- | PC Specification | Resolution | Framerate | Texture Quality | Shadow Quality | Effect Detail | Shader Detail | Multisampling Anti-Aliasing Mode | FXAA Anti-Aliasing | Sincronización vertical | | ---- - | ---- - | --- - - - | ---- - -| ----------- - - -| --------- - - | ------- - - Low-end PC (Intel Core i3, 4 GB RAM, Intel HD Graphics) | 1024x768 o inferior | 60 FPS o inferior | Baja o muy baja | Baja o muy baja | Baja o muy baja | Baja o muy baja | Ninguno o 2x MSAA | Discapacitados | | Equipo de gama media (Intel Core i5, 8 GB RAM, Nvidia Gegtce Force 1050) | 1280x960 o superior | 60 FPS o superior | Medio o alto | Medio o alto | Medio o alto | Medio o alto | 4x MSAA o 8x MSAA | Habilitado o desactivado | Deshabilitado | | PC de alta gama (Intel Core i7, 16 GB RAM, Nvidia GeForce RTX 2060) | 1920x1080 o superior | 120 FPS o superior | Alto o muy alto | Alto o muy alto | Alto o muy alto | 8x MSAA o 16x MSAA | Habilitado o Deshabilitado |
Los ajustes avanzados para Counter-Strike: Ofensiva Global
-
- | Comando de consola / Opción de lanzamiento | Descripción | | ---- | | fps_max [value] / -fps_max [value] | Este comando establece la velocidad de fotogramas máxima en la que puede ejecutarse el juego. Puede reemplazar [value] por cualquier número que se adapte a sus preferencias. Por ejemplo, fps_max 60 limitará el juego a 60 FPS. Este comando puede ayudar a reducir el rasgado de la pantalla y el tartamudeo. | | cl_forcepreload [value] / -preload [value] | Este comando obliga al juego a cargar todos los activos antes de iniciar una partida. Puede reemplazar [value] con 0 (off) o 1 (on). Por ejemplo, cl_forcepreload 1 habilitará este comando. Este comando puede ayudar a reducir los tiempos de carga y los picos de retraso. | | mat_queue_mode [value] / -high [value] | Este comando establece el modo de enhebrado del juego. Puede reemplazar [value] con -1 (auto), 0 (single-threaded), o 2 (multi-threaded). Por ejemplo, mat_queue_mode 2 habilitará multi-threading. Este comando puede ayudar a mejorar el rendimiento y la estabilidad de la CPU. | | snd_mixahead [value] / -snd_mixahead [value] | Este comando establece el tamaño del búfer de sonido del juego. Puede reemplazar [value] con cualquier número entre 0.01 y 0.1. Por ejemplo, snd_mixahead 0.05 establecerá el tamaño del búfer de sonido en 0.05 segundos. Este comando puede ayudar a reducir la latencia de sonido y la distorsión. |
Cómo actualizar los controladores y el software de su PC?
-permitir que su PC realice varias tareas, como el sistema operativo, navegador, antivirus y más. Actualizar sus controladores y software puede ayudar a mejorar el rendimiento y la seguridad de su PC, así como corregir cualquier error o error que pueda afectar su experiencia de juego. Estas son algunas de las mejores herramientas para actualizar tu PC:
-
Las mejores herramientas para actualizar tu PC
-
-
-
Windows Update. Esta es la herramienta oficial de Microsoft que le permite actualizar su sistema operativo Windows y otros productos de Microsoft. Windows Update puede comprobar e instalar automáticamente las últimas actualizaciones para su PC, o puede verificarlas e instalarlas usted mismo manualmente. Puede acceder a Windows Update haciendo clic en el botón Inicio y escribiendo "Windows Update" en el cuadro de búsqueda.
-
Driver Booster. Esta es una herramienta de terceros de IObit que le permite actualizar sus controladores con un solo clic. Driver Booster puede escanear su PC en busca de controladores obsoletos, faltantes o defectuosos y descargar e instalar los últimos para usted. También puede realizar copias de seguridad y restaurar sus controladores con esta herramienta. Puede descargar Driver Booster de forma gratuita desde su sitio web y ejecutarlo en su PC.
-
Ninite. Esta es una herramienta de terceros que le permite actualizar su software con facilidad. Ninite puede instalar y actualizar múltiples aplicaciones de software a la vez, sin ningún paso adicional o clics. Puede elegir entre una lista de aplicaciones de software populares, como navegadores, antivirus, reproductores multimedia y más, y Ninite las descargará e instalará por usted. Puedes descargar Ninite gratis desde su sitio web y ejecutarlo en tu PC.
-
-
Los beneficios de actualizar tu PC
-
Actualizar sus controladores y software en su PC puede tener muchos beneficios para su experiencia de juego y rendimiento general. Aquí están algunos de ellos:
-
-
Mejora la compatibilidad. Actualizar sus controladores y software puede asegurar que su PC es compatible con los últimos juegos y aplicaciones, así como con otros dispositivos de hardware. Esto puede prevenir bloqueos, errores o fallos que puedan ocurrir debido a archivos obsoletos o incompatibles.
-
-
Corrige errores. Actualizar sus controladores y software puede solucionar cualquier error o problema que pueda existir en sus archivos actuales. Esto puede resolver cualquier problema o error que pueda afectar su experiencia de juego o rendimiento. Esto también puede mejorar la seguridad y estabilidad de su PC.
-
-
Conclusión
-
En conclusión, Counter-Strike: Global Offensive es un juego increíble que definitivamente debes probar en tu PC. Ofrece un juego realista, divertido y social que te mantendrá entretenido durante horas. Puede descargarlo e instalarlo de forma gratuita a través de torrent o lanzador, dependiendo de su preferencia. También puede optimizarlo para su PC ajustando la configuración y actualizando los controladores y el software. Siguiendo estos consejos y trucos, puedes disfrutar al máximo de Counter-Strike: Ofensiva Global.
-
Si te gustó este artículo, por favor compártelo con tus amigos y deja un comentario a continuación. Además, no te olvides de revisar nuestros otros artículos sobre temas de juego. ¡Gracias por leer!
-
Preguntas frecuentes
-
Aquí están algunas de las preguntas más comunes que la gente hace sobre Counter-Strike: Ofensiva Global:
-
¿Está libre la Contrahuelga: Ofensiva Global?
-, o kits de música. También puedes comprar o vender estos artículos en el mercado de Steam o intercambiarlos con otros jugadores.
-
¿Es seguro el Contraataque: Ofensiva Global?
-
Sí, Counter-Strike: Global Offensive es seguro para descargar y jugar en tu PC, siempre y cuando uses una fuente confiable, como Steam, CS:GO Launcher o Warzone. Sin embargo, debe tener cuidado al usar torrents, ya que algunos de ellos pueden contener virus o malware que pueden dañar su PC. También debe usar una VPN para proteger su privacidad y evitar problemas legales al usar torrents.
-
¿Está en línea Counter-Strike: Ofensiva Global?
-
-
¿Cómo se juega Counter-Strike: Ofensiva global con amigos?
-
Hay varias formas de jugar a Counter-Strike: Global Offensive con tus amigos. Estas son algunas de ellas:
-
-
Únete al juego de un amigo. Puedes unirte al juego de un amigo haciendo clic en su nombre en tu lista de amigos de Steam y seleccionando "Unirse al juego". Esto lo conectará automáticamente a su servidor y equipo. También puedes invitarlos a tu juego haciendo clic en su nombre y seleccionando "Invitar al juego".
-
Cree un lobby. Puede crear un lobby haciendo clic en el botón "Play" en el menú principal y seleccionando "Create Lobby". Esto le permitirá invitar a hasta cuatro amigos a unirse a su vestíbulo y jugar juntos. Puede elegir el modo de juego, el mapa y el servidor en el que desea jugar.
-
Crear un servidor. Puede crear un servidor haciendo clic en el botón "Reproducir" en el menú principal y seleccionando "Crear servidor". Esto le permitirá alojar su propio servidor y personalizarlo con varias opciones, como contraseña, bots, fuego amigo y más. Puedes invitar a hasta 15 amigos a unirse a tu servidor y jugar juntos.
-
-
¿Cómo mejorar en Counter-Strike: Ofensiva Global?
-
Counter-Strike: Global Offensive es un juego que requiere habilidad, estrategia y trabajo en equipo para ganar. No es fácil de dominar, pero es posible mejorar con práctica y dedicación. Estos son algunos consejos que pueden ayudarte a mejorar en Counter-Strike: Ofensiva Global:
-
-
Práctica. La mejor manera de mejorar en algo es practicarlo regularmente. Puedes practicar Counter-Strike: Ofensiva Global jugando offline con bots, online con otros jugadores o en modo entrenamiento. También puede ver tutoriales, guías o partidos profesionales en línea para aprender de los expertos.
-
-
Movimiento. El movimiento es otra habilidad esencial en Counter-Strike: Ofensiva Global. Necesitas tener buen movimiento para esquivar balas, mirar esquinas y posicionarte estratégicamente. Usted puede mejorar su movimiento aprendiendo cómo strafe, agacharse, saltar, y salto del conejito. También puedes usar mapas de movimiento o mapas de surf para practicar tu movimiento.
-
Comunicación. La comunicación es un factor clave en Counter-Strike: Ofensiva Global. Necesitas comunicarte con tus compañeros de equipo de manera efectiva para coordinar tus acciones, compartir información y planificar tus estrategias. Puede mejorar su comunicación utilizando un micrófono, chat o comandos de voz. También puede usar llamadas o nombres de mapas para identificar ubicaciones.
-
Economía. La economía es un aspecto vital de la Contrahuelga: Ofensiva Global. Usted necesita para administrar su dinero sabiamente para comprar las mejores armas y equipos para cada ronda. Puede mejorar su economía sabiendo cuándo ahorrar, gastar o hacer eco. También puedes aprender cuánto cuesta cada arma y artículo y cuánto dinero obtienes por cada asesinato u objetivo.
-
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descarga De Msica Mp3 Descarga Mod Apk.md b/spaces/Benson/text-generation/Examples/Descarga De Msica Mp3 Descarga Mod Apk.md
deleted file mode 100644
index 360784993e7b20a59b29484a478d5f40ff80d11c..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descarga De Msica Mp3 Descarga Mod Apk.md
+++ /dev/null
@@ -1,71 +0,0 @@
-
-
Descarga de música MP3 Descargar Mod APK: Cómo descargar música gratis
-
¿Te encanta escuchar música pero odias pagar por servicios de streaming o comprar álbumes? ¿Quieres descargar tus canciones favoritas y disfrutarlas offline sin anuncios ni interrupciones? Si respondió sí a estas preguntas, entonces usted podría estar interesado en Music Downloader MP3 Download Mod APK. Esta es una versión modificada de una aplicación popular que le permite descargar archivos de música de varias fuentes de forma gratuita. En este artículo, le diremos todo lo que necesita saber sobre esta aplicación, incluidas sus características, cómo instalarla, cómo usarla y sus pros y contras.
-
¿Qué es Music Downloader MP3 Descargar Mod APK?
-
Music Downloader MP3 Download Mod APK es una versión modificada de una aplicación llamada Free Music Downloader, que está disponible en Google Play Store. Esta aplicación te permite descargar archivos de música de varias fuentes, como YouTube, SoundCloud, Spotify y más. Puede elegir entre formatos MP3 y MP4, y ajustar la calidad según su preferencia. También puede personalizar la configuración de descarga, como límites de velocidad y descargas simultáneas. Sin embargo, la aplicación original tiene algunas limitaciones, como anuncios, compras en la aplicación y solo tres descargas a la vez. La versión modificada elimina estas restricciones y te ofrece una experiencia musical sin anuncios.
Características del descargador de música MP3 Descargar Mod APK
-
- Descargar archivos MP3 y MP4 de varias fuentes
-
Con esta aplicación, puede descargar archivos de música de diferentes fuentes, como YouTube, SoundCloud, Spotify y más. Puede buscar sus canciones o artistas favoritos utilizando el navegador de la aplicación o pegar la URL de la fuente. También puedes navegar por diferentes categorías y géneros de música, como pop, rock, hip hop, jazz, etc.
-
- Personalizar la configuración de descarga
-
-
- Disfruta de una experiencia musical sin anuncios
-
Una de las mejores características de esta aplicación es que elimina todos los anuncios y compras en la aplicación que están presentes en la aplicación original. Esto significa que puedes disfrutar de tu música sin interrupciones ni distracciones. También puede guardar sus datos y batería evitando anuncios innecesarios.
-
Cómo instalar descargador de música MP3 Descargar Mod APK?
-
Si desea instalar esta aplicación en su dispositivo, debe seguir estos sencillos pasos:
-
Paso 1: Descargar el archivo APK de una fuente de confianza
Lo primero que hay que hacer es descargar el archivo APK de Music Downloader MP3 Download Mod APK de una fuente de confianza. Puede encontrar el enlace a la última versión de la aplicación en varios sitios web, como [APKPure], [APKMirror], o [APKCombo]. Asegúrate de descargar el archivo desde una fuente segura y confiable, y evita cualquier enlace falso o malicioso.
-
Paso 2: Habilitar fuentes desconocidas en el dispositivo
-
Antes de poder instalar el archivo APK, debe habilitar fuentes desconocidas en su dispositivo. Esto le permitirá instalar aplicaciones que no son de Google Play Store. Para hacer esto, vaya a la configuración del dispositivo, luego a la seguridad y luego a fuentes desconocidas. Active el interruptor para habilitarlo. Puede ver un mensaje de advertencia, pero no se preocupe, es seguro proceder.
-
-
Paso 3: Instalar el archivo APK y lanzar la aplicación
-
Ahora que ha habilitado fuentes desconocidas, puede instalar el archivo APK. Busque el archivo en el almacenamiento de su dispositivo y toque en él. Puede ver una ventana emergente pidiendo permisos, simplemente toque en instalar y espere a que el proceso se complete. Una vez instalada la aplicación, puede iniciarla desde el cajón de la aplicación o la pantalla de inicio.
-
¿Cómo utilizar el descargador de música MP3 Descargar Mod APK?
-
Usar esta aplicación es muy fácil y sencillo. Estos son los pasos que debes seguir:
-
Busca tus canciones o artistas favoritos
-
-
Seleccione el formato y la calidad del archivo
-
Después de encontrar la canción que desea descargar, toque en ella. Verá una ventana emergente con dos opciones: MP3 y MP4. Puede elegir entre estos dos formatos dependiendo de si desea archivos de audio o vídeo. También puede seleccionar la calidad de baja a alta. Cuanto mayor sea la calidad, mayor será el tamaño del archivo.
-
Toque en el botón de descarga y espere a que el proceso se complete
-
Una vez que haya seleccionado el formato y la calidad, toque en el botón de descarga en la parte inferior de la ventana emergente. Verá una barra de progreso que le muestra cuánto tiempo queda hasta que se complete su descarga. También puede pausar y reanudar sus descargas en cualquier momento. Puede acceder a los archivos descargados desde el reproductor de música o vídeo del dispositivo, o desde el gestor de descargas de la aplicación.
-
Pros y contras de Music Downloader MP3 Descargar Mod APK
-
Como cualquier otra aplicación, Music Downloader MP3 Download Mod APK tiene sus ventajas y desventajas. Estos son algunos de ellos:
-
Pros:
-
-
Gratis y fácil de usar: Usted no tiene que pagar nada o registrarse para nada para usar esta aplicación. Solo tienes que descargarlo y empezar a descargar tus canciones favoritas.
-
Soporta múltiples formatos de archivo y opciones de calidad: Puede elegir entre formatos MP3 y MP4, y ajustar la calidad según su preferencia. También puede descargar archivos de música de varias fuentes, como YouTube, SoundCloud, Spotify y más.
-
Sin anuncios ni interrupciones: A diferencia de la aplicación original, esta versión modificada elimina todos los anuncios y compras en la aplicación que están presentes en la aplicación original. Esto significa que puedes disfrutar de tu música sin interrupciones ni distracciones.
-
-
Contras:
-
-
Limitado a tres descargas a la vez: Una de las limitaciones de esta aplicación es que solo puede descargar tres archivos a la vez. Si desea descargar más archivos simultáneamente, debe esperar hasta que uno de ellos haya terminado.
-
-
Puede no ser compatible con algunos dispositivos o regiones: Por último, esta aplicación puede no funcionar en algunos dispositivos o regiones debido a problemas de compatibilidad o restricciones legales. Algunos usuarios han informado que no pueden instalar o usar esta aplicación en sus dispositivos o en sus países.
-
-
Conclusión
-
Si usted está buscando una manera de descargar archivos de música de forma gratuita de varias fuentes, entonces Music Downloader MP3 Download Mod APK podría ser una buena opción
Esta aplicación le permite descargar archivos MP3 y MP4 de varias fuentes, tales como YouTube, SoundCloud, Spotify, y más. También puede personalizar la configuración de descarga, como formato de archivo, calidad, límite de velocidad y descargas simultáneas. Además, puedes disfrutar de una experiencia de música sin publicidad con esta aplicación. Sin embargo, esta aplicación también tiene algunos inconvenientes, como descargas limitadas, dificultad para encontrar canciones y problemas de compatibilidad. Por lo tanto, debe utilizar esta aplicación a su propio riesgo y discreción.
-
Esperamos que este artículo le ha ayudado a entender lo que es Music Downloader MP3 Download Mod APK, cómo instalarlo, cómo usarlo, y sus pros y contras. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. ¡Gracias por leer!
-
Preguntas frecuentes
-
Aquí hay algunas preguntas frecuentes sobre Music Downloader MP3 Download Mod APK:
-
-
¿Es seguro usar Music Downloader MP3 Download Mod APK?
-
Music Downloader MP3 Download Mod APK es una versión modificada de una aplicación que está disponible en Google Play Store. Sin embargo, ya que no es de la fuente oficial, puede contener algunos riesgos o malware. Por lo tanto, debe descargarlo de una fuente confiable y escanearlo con un antivirus antes de instalarlo. También debe tener cuidado con las fuentes desde las que descarga archivos de música, ya que pueden contener virus o contenido ilegal.
-
¿Es Music Downloader MP3 Download Mod APK legal de usar?
-
-
¿Cómo puedo actualizar Music Downloader MP3 Descargar Mod APK?
-
Desde Music Downloader MP3 Download Mod APK no es de la Google Play Store, no se puede actualizar de forma automática o manual desde allí. Tienes que descargar la última versión de la aplicación desde una fuente de confianza e instalarla sobre la existente. Sin embargo, debe realizar una copia de seguridad de los archivos descargados antes de actualizar la aplicación, ya que pueden eliminarse o sobrescribirse durante el proceso.
-
¿Cómo puedo desinstalar Music Downloader MP3 Descargar Mod APK?
-
Si desea desinstalar Music Downloader MP3 Download Mod APK desde su dispositivo, puede seguir estos pasos:
-
-
Ir a la configuración de su dispositivo, a continuación, aplicaciones, entonces Music Downloader MP3 Descargar Mod APK.
-
Pulse en desinstalar y confirme su elección.
-
Elimina el archivo APK del almacenamiento de tu dispositivo si todavía lo tienes.
-
-
¿Cuáles son algunas alternativas a Music Downloader MP3 Download Mod APK?
-
Si usted está buscando algunas alternativas a Music Downloader MP3 Download Mod APK, puede probar estas aplicaciones:
-
-
[YMusic]: Esta es una aplicación que te permite descargar archivos de música de YouTube en formato MP3. También puede reproducir vídeos de YouTube en segundo plano con esta aplicación.
-
[SnapTube]: Esta es una aplicación que te permite descargar archivos de música y video de varias fuentes, como YouTube, Facebook, Instagram y más. También puede convertirlos a diferentes formatos y opciones de calidad.
-
[VidMate]: Esta es una aplicación que te permite descargar archivos de música y video de varias fuentes, como YouTube, Facebook, Instagram y más. También puede ver canales de televisión en vivo y películas con esta aplicación.
-
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descargar Ai Robot Perfect World.md b/spaces/Benson/text-generation/Examples/Descargar Ai Robot Perfect World.md
deleted file mode 100644
index 476f4a1deda3844d1f391ba3b3cc70dd614989af..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar Ai Robot Perfect World.md
+++ /dev/null
@@ -1,96 +0,0 @@
-
-
Descargar AI Robot Perfect World: Cómo mejorar su experiencia de juego con Cobots
-
Si estás buscando un juego online divertido, inmersivo y social, definitivamente deberías echar un vistazo a Perfect World. Este juego es un MMORPG de fantasía (juego de rol multijugador masivo en línea) que te permite explorar un mundo vasto y hermoso, crear tu propio personaje, unir fuerzas con otros jugadores y embarcarte en misiones y aventuras épicas. Pero ¿qué pasaría si pudieras hacer tu experiencia de juego aún más agradable y conveniente con la ayuda de AI Robot? Este es un cobot (robot colaborativo) que puede ayudarlo a jugar Perfect World automatizando sus habilidades, acciones, comunicación y más. En este artículo, te mostraremos cómo descargar AI Robot Perfect World y configurarlo, así como algunos consejos y trucos para optimizar tu rendimiento de juego con él.
Qué es el Mundo Perfecto y por qué deberías jugarlo
-
Una breve introducción al juego y sus características
-
-
Los beneficios de jugar Perfect World para tu bienestar mental y social
-
Jugar Perfect World no solo es divertido sino también beneficioso para tu bienestar mental y social. Estos son algunos de los beneficios que puedes obtener al jugar a este juego:
-
-
Alivio del estrés: Jugar a Perfect World puede ayudarte a relajarte y relajarte después de un largo día. Puede escapar de las presiones y preocupaciones de la realidad y sumergirse en un mundo de fantasía donde se puede explorar, crear y aventura. También puedes descargar tus emociones y frustraciones luchando contra monstruos o enemigos o chateando con otros jugadores.
-
Creatividad: Jugar Perfect World puede estimular tu imaginación y creatividad. Puedes expresarte a través de la apariencia de tu personaje, habilidades, equipo, mascotas, monturas, alas, etc. También puedes diseñar tu propia casa o gremio base o crear tus propias misiones o eventos. También puedes aprender de las creaciones de otros jugadores e inspirarte en ellas.
-
Habilidades cognitivas: Jugar Perfect World puede mejorar tus habilidades cognitivas como memoria, atención, concentración, resolución de problemas, lógica, estrategia, planificación, toma de decisiones, etc. Puedes mejorar tus habilidades cognitivas jugando Perfect World, como usted tiene que recordar la información diversa, preste la atención a los detalles, resuelva los problemas, aplique la lógica, diseñe las estrategias, planifique el futuro, tome las decisiones, el etc. Estas habilidades pueden ayudarle en su vida académica, profesional, y personal.
-
-
-
Como puedes ver, jugar a Perfect World puede ser bueno para tu bienestar mental y social. Pero, ¿cómo puedes mejorar tu experiencia de juego? Ahí es donde AI Robot entra en juego.
-
¿Qué es el robot de IA y cómo funciona
-
Una breve introducción al concepto y tipos de cobots
-
AI Robot es un cobot (robot colaborativo) que puede ayudarlo a jugar Perfect World automatizando sus habilidades, acciones, comunicación y más. Un cobot es un robot diseñado para trabajar junto a los humanos en un entorno compartido. A diferencia de los robots tradicionales que están programados para realizar tareas específicas de forma independiente y a menudo requieren barreras de seguridad o aislamiento de los humanos, los cobots son flexibles, adaptables e interactivos. Pueden aprender de los humanos y ajustar su comportamiento en consecuencia. También pueden comunicarse con los humanos y proporcionar retroalimentación o orientación. Los cobots se utilizan en diversos campos como la fabricación, la salud, la educación, el entretenimiento, etc.
-
Hay diferentes tipos de cobots dependiendo de sus funciones y características. Algunos de los tipos comunes son:
-
-
Cobots de asistencia: Estos son cobots que ayudan a los humanos a realizar tareas difíciles, peligrosas o tediosas. Por ejemplo, un cobot que levanta objetos pesados o manipula materiales peligrosos.
-
Cobots aumentativos: Estos son cobots que mejoran las capacidades o experiencias de los humanos al proporcionar información o funcionalidad adicional. Por ejemplo, un cobot que proyecta imágenes o sonidos o proporciona retroalimentación háptica.
-
Cobots autónomos: Estos son cobots que actúan de forma independiente o semi-independiente de los humanos en función de sus propios objetivos o reglas. Por ejemplo, un cobot que patrulla un edificio o entrega paquetes.
-
-
-
AI Robot es un cobot social que también puede realizar funciones asistenciales y aumentativas para jugadores de Perfect World.
-
Las ventajas de usar el robot AI para los jugadores del mundo perfecto
-
El uso de AI Robot para jugadores del mundo perfecto puede traer muchas ventajas tales como:
-
-
-
Conveniencia: AI Robot puede hacer que su experiencia de juego más conveniente mediante la automatización de sus habilidades y acciones. Usted no tiene que preocuparse de presionar botones o hacer clic en el ratón repetidamente. Puedes sentarte y disfrutar del juego mientras AI Robot hace el trabajo por ti.
-
Eficiencia: AI Robot puede hacer que su experiencia de juego más eficiente mediante la optimización de sus habilidades y acciones. Usted no tiene que perder tiempo o recursos en movimientos innecesarios o ineficaces. Puede confiar en los algoritmos y estrategias inteligentes de AI Robot para lograr los mejores resultados.
-
Comunicación: El robot AI puede hacer que tu experiencia de juego sea más comunicativa al mejorar tu comunicación y colaboración con otros jugadores. No tienes que escribir o hablar manualmente para chatear con otros jugadores. Puedes usar el procesamiento y la generación del lenguaje natural de AI Robot para conversar con ellos de manera fluida e inteligente.
-
Entretenimiento: AI Robot puede hacer que tu experiencia de juego sea más entretenida al agregar diversión y humor a tu juego. No tienes que estar aburrido o serio todo el tiempo mientras juegas Perfect World. Puedes disfrutar de los comentarios ingeniosos y chistes de AI Robot que te harán reír y sonreír.
-
-
Como puedes ver, el uso de AI Robot for Perfect World jugadores puede ser muy beneficioso para su experiencia de juego. Pero, ¿cómo descargar AI Robot Perfect World y configurarlo? Vamos a averiguar en la siguiente sección.
-
Cómo descargar AI Robot Perfect World y configurarlo
-
Los pasos para descargar e instalar AI Robot Perfect World desde el sitio web oficial
-
-
-
Ir al sitio web oficial de AI Robot Perfect World en .
-
Haga clic en el botón "Descargar" y elija la versión que coincida con su sistema operativo (Windows, Mac o Linux).
-
Guarde el archivo en su computadora y ejecútelo como administrador.
-
Siga las instrucciones en la pantalla para completar el proceso de instalación.
-
Inicie AI Robot Perfect World e ingrese la información de su cuenta de Perfect World.
-
-
Felicidades, ha descargado e instalado con éxito AI Robot Perfect World desde el sitio web oficial. Ahora, veamos cómo configurar y personalizar AI Robot Perfect World según sus preferencias.
-
Los pasos para configurar y personalizar AI Robot Perfect World según sus preferencias
-
Para configurar y personalizar AI Robot Perfect World según sus preferencias, debe seguir estos pasos:
-
-
Abrir AI Robot Perfect World y haga clic en el icono "Configuración" en la esquina superior derecha.
-
Elija la pestaña "General" y ajuste los ajustes como idioma, voz, volumen, velocidad, etc.
-
Elija la pestaña "Juego" y ajuste los ajustes como el modo de juego, nivel de habilidad, conjunto de habilidades, conjunto de acciones, etc.
-
Elija la pestaña "Chat" y ajuste los ajustes como el modo de chat, estilo de chat, temas de chat, filtros de chat, etc.
-
Elija la pestaña "Diversión" y ajuste los ajustes como nivel de humor, tipo de humor, frecuencia de humor, etc.
-
Haga clic en el botón "Guardar" para aplicar sus cambios.
-
-
Felicidades, ha configurado y personalizado con éxito AI Robot Perfect World de acuerdo a sus preferencias. Ahora, veamos algunos consejos y trucos para optimizar el rendimiento de tus juegos con AI Robot Perfect World.
-
Consejos y trucos para optimizar tu rendimiento de juego con AI Robot Perfect World
-
Cómo usar AI Robot Perfect World para automatizar tus habilidades y acciones
-
-
-
Abrir AI Robot Perfect World y haga clic en el "Juego" icono en la esquina superior izquierda.
-
Seleccione el modo de juego que desea jugar (PvE o PvP).
-
Seleccione el nivel de habilidad en el que desea que juegue AI Robot Perfect World (fácil, normal, duro o experto).
-
Seleccione el conjunto de habilidades que desea que el Robot IA Perfect World use para su personaje (básico, avanzado o personalizado).
-
Selecciona el conjunto de acciones que quieres que Robot IA Perfect World use para tu personaje (básico, avanzado o personalizado).
-
Haga clic en el botón "Inicio" para que AI Robot Perfect World se haga cargo de sus habilidades y acciones.
-
-
Felicidades, ha utilizado con éxito AI Robot Perfect World para automatizar sus habilidades y acciones. Ahora, puede ver AI Robot Perfect World jugar para usted o unirse a cada momento que desee. También puede pausar o detener AI Robot Perfect World en cualquier momento haciendo clic en el botón "Pausa" o "Detener".
-
Cómo utilizar AI Robot Perfect World para mejorar su comunicación y colaboración con otros jugadores
-
Para utilizar AI Robot Perfect World para mejorar su comunicación y colaboración con otros jugadores, debe seguir estos pasos:
-
-
Abrir AI Robot Perfect World y haga clic en el icono "Chat" en la esquina superior derecha.
-
Seleccione el modo de chat que desea utilizar (Auto o Manual).
-
Si eliges el modo automático, AI Robot Perfect World chateará automáticamente con otros jugadores según el estilo de chat, los temas de chat, los filtros de chat, etc. que hayas establecido en la configuración. También puede intervenir o anular AI Robot Perfect World en cualquier momento escribiendo o hablando manualmente.
-
Si elige el modo Manual, puede escribir o hablar manualmente para chatear con otros jugadores. También puede pedir a AI Robot Perfect World sugerencias o asistencia escribiendo o hablando "@AI" seguido de su solicitud. Por ejemplo, "@AI ¿Cómo puedo completar esta misión?" o "@AI Cuéntame un chiste."
-
-
-
Felicidades, ha utilizado con éxito AI Robot Perfect World para mejorar su comunicación y colaboración con otros jugadores. Ahora, puedes disfrutar chateando con otros jugadores más fácil y eficazmente con la ayuda de AI Robot Perfect World.
-
Conclusión
-
Un resumen de los puntos principales y una llamada a la acción
-
En conclusión, le hemos mostrado cómo descargar AI Robot Perfect World y configurarlo, así como algunos consejos y trucos para optimizar su rendimiento de juego con él. AI Robot Perfect World es un cobot que puede ayudarle a jugar Perfect World mediante la automatización de sus habilidades, acciones, comunicación y más. También puede hacer que su experiencia de juego sea más conveniente, eficiente, comunicativa y entretenida. Al usar AI Robot Perfect World, puedes mejorar tu experiencia de juego y disfrutar de Perfect World más que nunca. Si está interesado en probar AI Robot Perfect World, puede descargarlo desde el sitio web oficial en . También puede visitar el sitio web para obtener más información, tutoriales, soporte, comentarios y actualizaciones. También puede unirse a la comunidad AI Robot Perfect World en plataformas de redes sociales como Facebook, Twitter, Instagram, YouTube, etc. para conectarse con otros jugadores y compartir sus experiencias. Esperamos que este artículo haya sido útil e informativo para usted. Si tiene alguna pregunta o comentario, no dude en dejarlos a continuación. Nos encantaría saber de ti. ¡Gracias por leer y jugar feliz!
FAQs
-
Q: ¿Es AI Robot Perfect World seguro y legal de usar?
-
A: Sí, AI Robot Perfect World es seguro y legal de usar. No contiene ningún virus, malware, spyware u otros componentes dañinos. Tampoco viola ningún término de servicio o reglas de Perfect World. Es un cobot legítimo y autorizado que está diseñado para ayudar a los jugadores a disfrutar más del juego.
-
Q: ¿Cuánto cuesta AI Robot Perfect World?
-
-
P: ¿Puedo usar AI Robot Perfect World para otros juegos?
-
A: No, AI Robot Perfect World solo es compatible con Perfect World. No está diseñado para funcionar con otros juegos. Si quieres usar un cobot para otros juegos, necesitas buscar otros cobots que estén hechos específicamente para esos juegos.
-
Q: ¿Puedo usar AI Robot Perfect World en varios dispositivos?
-
A: Sí, puede usar AI Robot Perfect World en varios dispositivos siempre y cuando tengan el mismo sistema operativo (Windows, Mac o Linux). Solo necesitas descargar e instalar AI Robot Perfect World en cada dispositivo e iniciar sesión con la información de tu cuenta de Perfect World.
-
P: ¿Cómo puedo contactar a los desarrolladores de AI Robot Perfect World?
-
A: Puede ponerse en contacto con los desarrolladores de AI Robot Perfect World enviando un correo electrónico a . También puede visitar su sitio web en para obtener más información de contacto.
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/self_outdated_check.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/self_outdated_check.py
deleted file mode 100644
index 41cc42c5677ddf0709d9eeb894eb8dbe4fd16f91..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/self_outdated_check.py
+++ /dev/null
@@ -1,242 +0,0 @@
-import datetime
-import functools
-import hashlib
-import json
-import logging
-import optparse
-import os.path
-import sys
-from dataclasses import dataclass
-from typing import Any, Callable, Dict, Optional
-
-from pip._vendor.packaging.version import parse as parse_version
-from pip._vendor.rich.console import Group
-from pip._vendor.rich.markup import escape
-from pip._vendor.rich.text import Text
-
-from pip._internal.index.collector import LinkCollector
-from pip._internal.index.package_finder import PackageFinder
-from pip._internal.metadata import get_default_environment
-from pip._internal.metadata.base import DistributionVersion
-from pip._internal.models.selection_prefs import SelectionPreferences
-from pip._internal.network.session import PipSession
-from pip._internal.utils.compat import WINDOWS
-from pip._internal.utils.entrypoints import (
- get_best_invocation_for_this_pip,
- get_best_invocation_for_this_python,
-)
-from pip._internal.utils.filesystem import adjacent_tmp_file, check_path_owner, replace
-from pip._internal.utils.misc import ensure_dir
-
-_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
-
-
-logger = logging.getLogger(__name__)
-
-
-def _get_statefile_name(key: str) -> str:
- key_bytes = key.encode()
- name = hashlib.sha224(key_bytes).hexdigest()
- return name
-
-
-class SelfCheckState:
- def __init__(self, cache_dir: str) -> None:
- self._state: Dict[str, Any] = {}
- self._statefile_path = None
-
- # Try to load the existing state
- if cache_dir:
- self._statefile_path = os.path.join(
- cache_dir, "selfcheck", _get_statefile_name(self.key)
- )
- try:
- with open(self._statefile_path, encoding="utf-8") as statefile:
- self._state = json.load(statefile)
- except (OSError, ValueError, KeyError):
- # Explicitly suppressing exceptions, since we don't want to
- # error out if the cache file is invalid.
- pass
-
- @property
- def key(self) -> str:
- return sys.prefix
-
- def get(self, current_time: datetime.datetime) -> Optional[str]:
- """Check if we have a not-outdated version loaded already."""
- if not self._state:
- return None
-
- if "last_check" not in self._state:
- return None
-
- if "pypi_version" not in self._state:
- return None
-
- seven_days_in_seconds = 7 * 24 * 60 * 60
-
- # Determine if we need to refresh the state
- last_check = datetime.datetime.strptime(self._state["last_check"], _DATE_FMT)
- seconds_since_last_check = (current_time - last_check).total_seconds()
- if seconds_since_last_check > seven_days_in_seconds:
- return None
-
- return self._state["pypi_version"]
-
- def set(self, pypi_version: str, current_time: datetime.datetime) -> None:
- # If we do not have a path to cache in, don't bother saving.
- if not self._statefile_path:
- return
-
- # Check to make sure that we own the directory
- if not check_path_owner(os.path.dirname(self._statefile_path)):
- return
-
- # Now that we've ensured the directory is owned by this user, we'll go
- # ahead and make sure that all our directories are created.
- ensure_dir(os.path.dirname(self._statefile_path))
-
- state = {
- # Include the key so it's easy to tell which pip wrote the
- # file.
- "key": self.key,
- "last_check": current_time.strftime(_DATE_FMT),
- "pypi_version": pypi_version,
- }
-
- text = json.dumps(state, sort_keys=True, separators=(",", ":"))
-
- with adjacent_tmp_file(self._statefile_path) as f:
- f.write(text.encode())
-
- try:
- # Since we have a prefix-specific state file, we can just
- # overwrite whatever is there, no need to check.
- replace(f.name, self._statefile_path)
- except OSError:
- # Best effort.
- pass
-
-
-@dataclass
-class UpgradePrompt:
- old: str
- new: str
-
- def __rich__(self) -> Group:
- if WINDOWS:
- pip_cmd = f"{get_best_invocation_for_this_python()} -m pip"
- else:
- pip_cmd = get_best_invocation_for_this_pip()
-
- notice = "[bold][[reset][blue]notice[reset][bold]][reset]"
- return Group(
- Text(),
- Text.from_markup(
- f"{notice} A new release of pip is available: "
- f"[red]{self.old}[reset] -> [green]{self.new}[reset]"
- ),
- Text.from_markup(
- f"{notice} To update, run: "
- f"[green]{escape(pip_cmd)} install --upgrade pip"
- ),
- )
-
-
-def was_installed_by_pip(pkg: str) -> bool:
- """Checks whether pkg was installed by pip
-
- This is used not to display the upgrade message when pip is in fact
- installed by system package manager, such as dnf on Fedora.
- """
- dist = get_default_environment().get_distribution(pkg)
- return dist is not None and "pip" == dist.installer
-
-
-def _get_current_remote_pip_version(
- session: PipSession, options: optparse.Values
-) -> Optional[str]:
- # Lets use PackageFinder to see what the latest pip version is
- link_collector = LinkCollector.create(
- session,
- options=options,
- suppress_no_index=True,
- )
-
- # Pass allow_yanked=False so we don't suggest upgrading to a
- # yanked version.
- selection_prefs = SelectionPreferences(
- allow_yanked=False,
- allow_all_prereleases=False, # Explicitly set to False
- )
-
- finder = PackageFinder.create(
- link_collector=link_collector,
- selection_prefs=selection_prefs,
- )
- best_candidate = finder.find_best_candidate("pip").best_candidate
- if best_candidate is None:
- return None
-
- return str(best_candidate.version)
-
-
-def _self_version_check_logic(
- *,
- state: SelfCheckState,
- current_time: datetime.datetime,
- local_version: DistributionVersion,
- get_remote_version: Callable[[], Optional[str]],
-) -> Optional[UpgradePrompt]:
- remote_version_str = state.get(current_time)
- if remote_version_str is None:
- remote_version_str = get_remote_version()
- if remote_version_str is None:
- logger.debug("No remote pip version found")
- return None
- state.set(remote_version_str, current_time)
-
- remote_version = parse_version(remote_version_str)
- logger.debug("Remote version of pip: %s", remote_version)
- logger.debug("Local version of pip: %s", local_version)
-
- pip_installed_by_pip = was_installed_by_pip("pip")
- logger.debug("Was pip installed by pip? %s", pip_installed_by_pip)
- if not pip_installed_by_pip:
- return None # Only suggest upgrade if pip is installed by pip.
-
- local_version_is_older = (
- local_version < remote_version
- and local_version.base_version != remote_version.base_version
- )
- if local_version_is_older:
- return UpgradePrompt(old=str(local_version), new=remote_version_str)
-
- return None
-
-
-def pip_self_version_check(session: PipSession, options: optparse.Values) -> None:
- """Check for an update for pip.
-
- Limit the frequency of checks to once per week. State is stored either in
- the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix
- of the pip script path.
- """
- installed_dist = get_default_environment().get_distribution("pip")
- if not installed_dist:
- return
-
- try:
- upgrade_prompt = _self_version_check_logic(
- state=SelfCheckState(cache_dir=options.cache_dir),
- current_time=datetime.datetime.utcnow(),
- local_version=installed_dist.version,
- get_remote_version=functools.partial(
- _get_current_remote_pip_version, session, options
- ),
- )
- if upgrade_prompt is not None:
- logger.warning("[present-rich] %s", upgrade_prompt)
- except Exception:
- logger.warning("There was an error checking the latest version of pip.")
- logger.debug("See below for error", exc_info=True)
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/nap.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/nap.py
deleted file mode 100644
index 72aa5bfd4b60d8e6ef6ed0cf2ae4f763d12195cc..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/tenacity/nap.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright 2016 Étienne Bersac
-# Copyright 2016 Julien Danjou
-# Copyright 2016 Joshua Harlow
-# Copyright 2013-2014 Ray Holder
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import time
-import typing
-
-if typing.TYPE_CHECKING:
- import threading
-
-
-def sleep(seconds: float) -> None:
- """
- Sleep strategy that delays execution for a given number of seconds.
-
- This is the default strategy, and may be mocked out for unit testing.
- """
- time.sleep(seconds)
-
-
-class sleep_using_event:
- """Sleep strategy that waits on an event to be set."""
-
- def __init__(self, event: "threading.Event") -> None:
- self.event = event
-
- def __call__(self, timeout: typing.Optional[float]) -> None:
- # NOTE(harlowja): this may *not* actually wait for timeout
- # seconds if the event is set (ie this may eject out early).
- self.event.wait(timeout=timeout)
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/pyparsing/diagram/__init__.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/pyparsing/diagram/__init__.py
deleted file mode 100644
index 898644755cbbf9a8d4df562663114a7eb7e11fd1..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/pyparsing/diagram/__init__.py
+++ /dev/null
@@ -1,642 +0,0 @@
-import railroad
-import pyparsing
-import typing
-from typing import (
- List,
- NamedTuple,
- Generic,
- TypeVar,
- Dict,
- Callable,
- Set,
- Iterable,
-)
-from jinja2 import Template
-from io import StringIO
-import inspect
-
-
-jinja2_template_source = """\
-
-
-
- {% if not head %}
-
- {% else %}
- {{ head | safe }}
- {% endif %}
-
-
-{{ body | safe }}
-{% for diagram in diagrams %}
-
-
{{ diagram.title }}
-
{{ diagram.text }}
-
- {{ diagram.svg }}
-
-
-{% endfor %}
-
-
-"""
-
-template = Template(jinja2_template_source)
-
-# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet
-NamedDiagram = NamedTuple(
- "NamedDiagram",
- [("name", str), ("diagram", typing.Optional[railroad.DiagramItem]), ("index", int)],
-)
-"""
-A simple structure for associating a name with a railroad diagram
-"""
-
-T = TypeVar("T")
-
-
-class EachItem(railroad.Group):
- """
- Custom railroad item to compose a:
- - Group containing a
- - OneOrMore containing a
- - Choice of the elements in the Each
- with the group label indicating that all must be matched
- """
-
- all_label = "[ALL]"
-
- def __init__(self, *items):
- choice_item = railroad.Choice(len(items) - 1, *items)
- one_or_more_item = railroad.OneOrMore(item=choice_item)
- super().__init__(one_or_more_item, label=self.all_label)
-
-
-class AnnotatedItem(railroad.Group):
- """
- Simple subclass of Group that creates an annotation label
- """
-
- def __init__(self, label: str, item):
- super().__init__(item=item, label="[{}]".format(label) if label else label)
-
-
-class EditablePartial(Generic[T]):
- """
- Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been
- constructed.
- """
-
- # We need this here because the railroad constructors actually transform the data, so can't be called until the
- # entire tree is assembled
-
- def __init__(self, func: Callable[..., T], args: list, kwargs: dict):
- self.func = func
- self.args = args
- self.kwargs = kwargs
-
- @classmethod
- def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]":
- """
- If you call this function in the same way that you would call the constructor, it will store the arguments
- as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3)
- """
- return EditablePartial(func=func, args=list(args), kwargs=kwargs)
-
- @property
- def name(self):
- return self.kwargs["name"]
-
- def __call__(self) -> T:
- """
- Evaluate the partial and return the result
- """
- args = self.args.copy()
- kwargs = self.kwargs.copy()
-
- # This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g.
- # args=['list', 'of', 'things'])
- arg_spec = inspect.getfullargspec(self.func)
- if arg_spec.varargs in self.kwargs:
- args += kwargs.pop(arg_spec.varargs)
-
- return self.func(*args, **kwargs)
-
-
-def railroad_to_html(diagrams: List[NamedDiagram], **kwargs) -> str:
- """
- Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams
- :params kwargs: kwargs to be passed in to the template
- """
- data = []
- for diagram in diagrams:
- if diagram.diagram is None:
- continue
- io = StringIO()
- diagram.diagram.writeSvg(io.write)
- title = diagram.name
- if diagram.index == 0:
- title += " (root)"
- data.append({"title": title, "text": "", "svg": io.getvalue()})
-
- return template.render(diagrams=data, **kwargs)
-
-
-def resolve_partial(partial: "EditablePartial[T]") -> T:
- """
- Recursively resolves a collection of Partials into whatever type they are
- """
- if isinstance(partial, EditablePartial):
- partial.args = resolve_partial(partial.args)
- partial.kwargs = resolve_partial(partial.kwargs)
- return partial()
- elif isinstance(partial, list):
- return [resolve_partial(x) for x in partial]
- elif isinstance(partial, dict):
- return {key: resolve_partial(x) for key, x in partial.items()}
- else:
- return partial
-
-
-def to_railroad(
- element: pyparsing.ParserElement,
- diagram_kwargs: typing.Optional[dict] = None,
- vertical: int = 3,
- show_results_names: bool = False,
- show_groups: bool = False,
-) -> List[NamedDiagram]:
- """
- Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram
- creation if you want to access the Railroad tree before it is converted to HTML
- :param element: base element of the parser being diagrammed
- :param diagram_kwargs: kwargs to pass to the Diagram() constructor
- :param vertical: (optional) - int - limit at which number of alternatives should be
- shown vertically instead of horizontally
- :param show_results_names - bool to indicate whether results name annotations should be
- included in the diagram
- :param show_groups - bool to indicate whether groups should be highlighted with an unlabeled
- surrounding box
- """
- # Convert the whole tree underneath the root
- lookup = ConverterState(diagram_kwargs=diagram_kwargs or {})
- _to_diagram_element(
- element,
- lookup=lookup,
- parent=None,
- vertical=vertical,
- show_results_names=show_results_names,
- show_groups=show_groups,
- )
-
- root_id = id(element)
- # Convert the root if it hasn't been already
- if root_id in lookup:
- if not element.customName:
- lookup[root_id].name = ""
- lookup[root_id].mark_for_extraction(root_id, lookup, force=True)
-
- # Now that we're finished, we can convert from intermediate structures into Railroad elements
- diags = list(lookup.diagrams.values())
- if len(diags) > 1:
- # collapse out duplicate diags with the same name
- seen = set()
- deduped_diags = []
- for d in diags:
- # don't extract SkipTo elements, they are uninformative as subdiagrams
- if d.name == "...":
- continue
- if d.name is not None and d.name not in seen:
- seen.add(d.name)
- deduped_diags.append(d)
- resolved = [resolve_partial(partial) for partial in deduped_diags]
- else:
- # special case - if just one diagram, always display it, even if
- # it has no name
- resolved = [resolve_partial(partial) for partial in diags]
- return sorted(resolved, key=lambda diag: diag.index)
-
-
-def _should_vertical(
- specification: int, exprs: Iterable[pyparsing.ParserElement]
-) -> bool:
- """
- Returns true if we should return a vertical list of elements
- """
- if specification is None:
- return False
- else:
- return len(_visible_exprs(exprs)) >= specification
-
-
-class ElementState:
- """
- State recorded for an individual pyparsing Element
- """
-
- # Note: this should be a dataclass, but we have to support Python 3.5
- def __init__(
- self,
- element: pyparsing.ParserElement,
- converted: EditablePartial,
- parent: EditablePartial,
- number: int,
- name: str = None,
- parent_index: typing.Optional[int] = None,
- ):
- #: The pyparsing element that this represents
- self.element: pyparsing.ParserElement = element
- #: The name of the element
- self.name: typing.Optional[str] = name
- #: The output Railroad element in an unconverted state
- self.converted: EditablePartial = converted
- #: The parent Railroad element, which we store so that we can extract this if it's duplicated
- self.parent: EditablePartial = parent
- #: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram
- self.number: int = number
- #: The index of this inside its parent
- self.parent_index: typing.Optional[int] = parent_index
- #: If true, we should extract this out into a subdiagram
- self.extract: bool = False
- #: If true, all of this element's children have been filled out
- self.complete: bool = False
-
- def mark_for_extraction(
- self, el_id: int, state: "ConverterState", name: str = None, force: bool = False
- ):
- """
- Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram
- :param el_id: id of the element
- :param state: element/diagram state tracker
- :param name: name to use for this element's text
- :param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the
- root element when we know we're finished
- """
- self.extract = True
-
- # Set the name
- if not self.name:
- if name:
- # Allow forcing a custom name
- self.name = name
- elif self.element.customName:
- self.name = self.element.customName
- else:
- self.name = ""
-
- # Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children
- # to be added
- # Also, if this is just a string literal etc, don't bother extracting it
- if force or (self.complete and _worth_extracting(self.element)):
- state.extract_into_diagram(el_id)
-
-
-class ConverterState:
- """
- Stores some state that persists between recursions into the element tree
- """
-
- def __init__(self, diagram_kwargs: typing.Optional[dict] = None):
- #: A dictionary mapping ParserElements to state relating to them
- self._element_diagram_states: Dict[int, ElementState] = {}
- #: A dictionary mapping ParserElement IDs to subdiagrams generated from them
- self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {}
- #: The index of the next unnamed element
- self.unnamed_index: int = 1
- #: The index of the next element. This is used for sorting
- self.index: int = 0
- #: Shared kwargs that are used to customize the construction of diagrams
- self.diagram_kwargs: dict = diagram_kwargs or {}
- self.extracted_diagram_names: Set[str] = set()
-
- def __setitem__(self, key: int, value: ElementState):
- self._element_diagram_states[key] = value
-
- def __getitem__(self, key: int) -> ElementState:
- return self._element_diagram_states[key]
-
- def __delitem__(self, key: int):
- del self._element_diagram_states[key]
-
- def __contains__(self, key: int):
- return key in self._element_diagram_states
-
- def generate_unnamed(self) -> int:
- """
- Generate a number used in the name of an otherwise unnamed diagram
- """
- self.unnamed_index += 1
- return self.unnamed_index
-
- def generate_index(self) -> int:
- """
- Generate a number used to index a diagram
- """
- self.index += 1
- return self.index
-
- def extract_into_diagram(self, el_id: int):
- """
- Used when we encounter the same token twice in the same tree. When this
- happens, we replace all instances of that token with a terminal, and
- create a new subdiagram for the token
- """
- position = self[el_id]
-
- # Replace the original definition of this element with a regular block
- if position.parent:
- ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name)
- if "item" in position.parent.kwargs:
- position.parent.kwargs["item"] = ret
- elif "items" in position.parent.kwargs:
- position.parent.kwargs["items"][position.parent_index] = ret
-
- # If the element we're extracting is a group, skip to its content but keep the title
- if position.converted.func == railroad.Group:
- content = position.converted.kwargs["item"]
- else:
- content = position.converted
-
- self.diagrams[el_id] = EditablePartial.from_call(
- NamedDiagram,
- name=position.name,
- diagram=EditablePartial.from_call(
- railroad.Diagram, content, **self.diagram_kwargs
- ),
- index=position.number,
- )
-
- del self[el_id]
-
-
-def _worth_extracting(element: pyparsing.ParserElement) -> bool:
- """
- Returns true if this element is worth having its own sub-diagram. Simply, if any of its children
- themselves have children, then its complex enough to extract
- """
- children = element.recurse()
- return any(child.recurse() for child in children)
-
-
-def _apply_diagram_item_enhancements(fn):
- """
- decorator to ensure enhancements to a diagram item (such as results name annotations)
- get applied on return from _to_diagram_element (we do this since there are several
- returns in _to_diagram_element)
- """
-
- def _inner(
- element: pyparsing.ParserElement,
- parent: typing.Optional[EditablePartial],
- lookup: ConverterState = None,
- vertical: int = None,
- index: int = 0,
- name_hint: str = None,
- show_results_names: bool = False,
- show_groups: bool = False,
- ) -> typing.Optional[EditablePartial]:
-
- ret = fn(
- element,
- parent,
- lookup,
- vertical,
- index,
- name_hint,
- show_results_names,
- show_groups,
- )
-
- # apply annotation for results name, if present
- if show_results_names and ret is not None:
- element_results_name = element.resultsName
- if element_results_name:
- # add "*" to indicate if this is a "list all results" name
- element_results_name += "" if element.modalResults else "*"
- ret = EditablePartial.from_call(
- railroad.Group, item=ret, label=element_results_name
- )
-
- return ret
-
- return _inner
-
-
-def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]):
- non_diagramming_exprs = (
- pyparsing.ParseElementEnhance,
- pyparsing.PositionToken,
- pyparsing.And._ErrorStop,
- )
- return [
- e
- for e in exprs
- if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs))
- ]
-
-
-@_apply_diagram_item_enhancements
-def _to_diagram_element(
- element: pyparsing.ParserElement,
- parent: typing.Optional[EditablePartial],
- lookup: ConverterState = None,
- vertical: int = None,
- index: int = 0,
- name_hint: str = None,
- show_results_names: bool = False,
- show_groups: bool = False,
-) -> typing.Optional[EditablePartial]:
- """
- Recursively converts a PyParsing Element to a railroad Element
- :param lookup: The shared converter state that keeps track of useful things
- :param index: The index of this element within the parent
- :param parent: The parent of this element in the output tree
- :param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default),
- it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never
- do so
- :param name_hint: If provided, this will override the generated name
- :param show_results_names: bool flag indicating whether to add annotations for results names
- :returns: The converted version of the input element, but as a Partial that hasn't yet been constructed
- :param show_groups: bool flag indicating whether to show groups using bounding box
- """
- exprs = element.recurse()
- name = name_hint or element.customName or element.__class__.__name__
-
- # Python's id() is used to provide a unique identifier for elements
- el_id = id(element)
-
- element_results_name = element.resultsName
-
- # Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram
- if not element.customName:
- if isinstance(
- element,
- (
- # pyparsing.TokenConverter,
- # pyparsing.Forward,
- pyparsing.Located,
- ),
- ):
- # However, if this element has a useful custom name, and its child does not, we can pass it on to the child
- if exprs:
- if not exprs[0].customName:
- propagated_name = name
- else:
- propagated_name = None
-
- return _to_diagram_element(
- element.expr,
- parent=parent,
- lookup=lookup,
- vertical=vertical,
- index=index,
- name_hint=propagated_name,
- show_results_names=show_results_names,
- show_groups=show_groups,
- )
-
- # If the element isn't worth extracting, we always treat it as the first time we say it
- if _worth_extracting(element):
- if el_id in lookup:
- # If we've seen this element exactly once before, we are only just now finding out that it's a duplicate,
- # so we have to extract it into a new diagram.
- looked_up = lookup[el_id]
- looked_up.mark_for_extraction(el_id, lookup, name=name_hint)
- ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name)
- return ret
-
- elif el_id in lookup.diagrams:
- # If we have seen the element at least twice before, and have already extracted it into a subdiagram, we
- # just put in a marker element that refers to the sub-diagram
- ret = EditablePartial.from_call(
- railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
- )
- return ret
-
- # Recursively convert child elements
- # Here we find the most relevant Railroad element for matching pyparsing Element
- # We use ``items=[]`` here to hold the place for where the child elements will go once created
- if isinstance(element, pyparsing.And):
- # detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat
- # (all will have the same name, and resultsName)
- if not exprs:
- return None
- if len(set((e.name, e.resultsName) for e in exprs)) == 1:
- ret = EditablePartial.from_call(
- railroad.OneOrMore, item="", repeat=str(len(exprs))
- )
- elif _should_vertical(vertical, exprs):
- ret = EditablePartial.from_call(railroad.Stack, items=[])
- else:
- ret = EditablePartial.from_call(railroad.Sequence, items=[])
- elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)):
- if not exprs:
- return None
- if _should_vertical(vertical, exprs):
- ret = EditablePartial.from_call(railroad.Choice, 0, items=[])
- else:
- ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[])
- elif isinstance(element, pyparsing.Each):
- if not exprs:
- return None
- ret = EditablePartial.from_call(EachItem, items=[])
- elif isinstance(element, pyparsing.NotAny):
- ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="")
- elif isinstance(element, pyparsing.FollowedBy):
- ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="")
- elif isinstance(element, pyparsing.PrecededBy):
- ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="")
- elif isinstance(element, pyparsing.Group):
- if show_groups:
- ret = EditablePartial.from_call(AnnotatedItem, label="", item="")
- else:
- ret = EditablePartial.from_call(railroad.Group, label="", item="")
- elif isinstance(element, pyparsing.TokenConverter):
- ret = EditablePartial.from_call(
- AnnotatedItem, label=type(element).__name__.lower(), item=""
- )
- elif isinstance(element, pyparsing.Opt):
- ret = EditablePartial.from_call(railroad.Optional, item="")
- elif isinstance(element, pyparsing.OneOrMore):
- ret = EditablePartial.from_call(railroad.OneOrMore, item="")
- elif isinstance(element, pyparsing.ZeroOrMore):
- ret = EditablePartial.from_call(railroad.ZeroOrMore, item="")
- elif isinstance(element, pyparsing.Group):
- ret = EditablePartial.from_call(
- railroad.Group, item=None, label=element_results_name
- )
- elif isinstance(element, pyparsing.Empty) and not element.customName:
- # Skip unnamed "Empty" elements
- ret = None
- elif len(exprs) > 1:
- ret = EditablePartial.from_call(railroad.Sequence, items=[])
- elif len(exprs) > 0 and not element_results_name:
- ret = EditablePartial.from_call(railroad.Group, item="", label=name)
- else:
- terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName)
- ret = terminal
-
- if ret is None:
- return
-
- # Indicate this element's position in the tree so we can extract it if necessary
- lookup[el_id] = ElementState(
- element=element,
- converted=ret,
- parent=parent,
- parent_index=index,
- number=lookup.generate_index(),
- )
- if element.customName:
- lookup[el_id].mark_for_extraction(el_id, lookup, element.customName)
-
- i = 0
- for expr in exprs:
- # Add a placeholder index in case we have to extract the child before we even add it to the parent
- if "items" in ret.kwargs:
- ret.kwargs["items"].insert(i, None)
-
- item = _to_diagram_element(
- expr,
- parent=ret,
- lookup=lookup,
- vertical=vertical,
- index=i,
- show_results_names=show_results_names,
- show_groups=show_groups,
- )
-
- # Some elements don't need to be shown in the diagram
- if item is not None:
- if "item" in ret.kwargs:
- ret.kwargs["item"] = item
- elif "items" in ret.kwargs:
- # If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal
- ret.kwargs["items"][i] = item
- i += 1
- elif "items" in ret.kwargs:
- # If we're supposed to skip this element, remove it from the parent
- del ret.kwargs["items"][i]
-
- # If all this items children are none, skip this item
- if ret and (
- ("items" in ret.kwargs and len(ret.kwargs["items"]) == 0)
- or ("item" in ret.kwargs and ret.kwargs["item"] is None)
- ):
- ret = EditablePartial.from_call(railroad.Terminal, name)
-
- # Mark this element as "complete", ie it has all of its children
- if el_id in lookup:
- lookup[el_id].complete = True
-
- if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete:
- lookup.extract_into_diagram(el_id)
- if ret is not None:
- ret = EditablePartial.from_call(
- railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
- )
-
- return ret
diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/utils/analysis.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/utils/analysis.py
deleted file mode 100644
index 3bc5e06e31520f82f3b0c5e3213c2e5cf8858057..0000000000000000000000000000000000000000
--- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/utils/analysis.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-# -*- coding: utf-8 -*-
-
-import logging
-import typing
-import torch
-from fvcore.nn import activation_count, flop_count, parameter_count, parameter_count_table
-from torch import nn
-
-from .logger import log_first_n
-
-__all__ = [
- "activation_count_operators",
- "flop_count_operators",
- "parameter_count_table",
- "parameter_count",
-]
-
-FLOPS_MODE = "flops"
-ACTIVATIONS_MODE = "activations"
-
-
-def flop_count_operators(
- model: nn.Module, inputs: list, **kwargs
-) -> typing.DefaultDict[str, float]:
- """
- Implement operator-level flops counting using jit.
- This is a wrapper of fvcore.nn.flop_count, that supports standard detection models
- in detectron2.
-
- Note:
- The function runs the input through the model to compute flops.
- The flops of a detection model is often input-dependent, for example,
- the flops of box & mask head depends on the number of proposals &
- the number of detected objects.
- Therefore, the flops counting using a single input may not accurately
- reflect the computation cost of a model.
-
- Args:
- model: a detectron2 model that takes `list[dict]` as input.
- inputs (list[dict]): inputs to model, in detectron2's standard format.
- """
- return _wrapper_count_operators(model=model, inputs=inputs, mode=FLOPS_MODE, **kwargs)
-
-
-def activation_count_operators(
- model: nn.Module, inputs: list, **kwargs
-) -> typing.DefaultDict[str, float]:
- """
- Implement operator-level activations counting using jit.
- This is a wrapper of fvcore.nn.activation_count, that supports standard detection models
- in detectron2.
-
- Note:
- The function runs the input through the model to compute activations.
- The activations of a detection model is often input-dependent, for example,
- the activations of box & mask head depends on the number of proposals &
- the number of detected objects.
-
- Args:
- model: a detectron2 model that takes `list[dict]` as input.
- inputs (list[dict]): inputs to model, in detectron2's standard format.
- """
- return _wrapper_count_operators(model=model, inputs=inputs, mode=ACTIVATIONS_MODE, **kwargs)
-
-
-def _wrapper_count_operators(
- model: nn.Module, inputs: list, mode: str, **kwargs
-) -> typing.DefaultDict[str, float]:
-
- assert len(inputs) == 1, "Please use batch size=1"
- tensor_input = inputs[0]["image"]
-
- class WrapModel(nn.Module):
- def __init__(self, model):
- super().__init__()
- if isinstance(
- model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)
- ):
- self.model = model.module
- else:
- self.model = model
-
- def forward(self, image):
- # jit requires the input/output to be Tensors
- inputs = [{"image": image}]
- outputs = self.model.forward(inputs)[0]
- if isinstance(outputs, dict) and "instances" in outputs:
- # Only the subgraph that computes the returned tensor will be
- # counted. So we return everything we found in Instances.
- inst = outputs["instances"]
- ret = [inst.pred_boxes.tensor]
- inst.remove("pred_boxes")
- for k, v in inst.get_fields().items():
- if isinstance(v, torch.Tensor):
- ret.append(v)
- else:
- log_first_n(
- logging.WARN,
- f"Field '{k}' in output instances is not included"
- " in flops/activations count.",
- n=10,
- )
- return tuple(ret)
- raise NotImplementedError("Count for segmentation models is not supported yet.")
-
- old_train = model.training
- with torch.no_grad():
- if mode == FLOPS_MODE:
- ret = flop_count(WrapModel(model).train(False), (tensor_input,), **kwargs)
- elif mode == ACTIVATIONS_MODE:
- ret = activation_count(WrapModel(model).train(False), (tensor_input,), **kwargs)
- else:
- raise NotImplementedError("Count for mode {} is not supported yet.".format(mode))
- # compatible with change in fvcore
- if isinstance(ret, tuple):
- ret = ret[0]
- model.train(old_train)
- return ret
diff --git a/spaces/CVPR/GFPGAN-example/gfpgan/models/gfpgan_model.py b/spaces/CVPR/GFPGAN-example/gfpgan/models/gfpgan_model.py
deleted file mode 100644
index c3d51b0b80f775c48d8c1701727b871b1f99e02c..0000000000000000000000000000000000000000
--- a/spaces/CVPR/GFPGAN-example/gfpgan/models/gfpgan_model.py
+++ /dev/null
@@ -1,580 +0,0 @@
-import math
-import os.path as osp
-import torch
-from basicsr.archs import build_network
-from basicsr.losses import build_loss
-from basicsr.losses.losses import r1_penalty
-from basicsr.metrics import calculate_metric
-from basicsr.models.base_model import BaseModel
-from basicsr.utils import get_root_logger, imwrite, tensor2img
-from basicsr.utils.registry import MODEL_REGISTRY
-from collections import OrderedDict
-from torch.nn import functional as F
-from torchvision.ops import roi_align
-from tqdm import tqdm
-
-
-@MODEL_REGISTRY.register()
-class GFPGANModel(BaseModel):
- """The GFPGAN model for Towards real-world blind face restoratin with generative facial prior"""
-
- def __init__(self, opt):
- super(GFPGANModel, self).__init__(opt)
- self.idx = 0 # it is used for saving data for check
-
- # define network
- self.net_g = build_network(opt['network_g'])
- self.net_g = self.model_to_device(self.net_g)
- self.print_network(self.net_g)
-
- # load pretrained model
- load_path = self.opt['path'].get('pretrain_network_g', None)
- if load_path is not None:
- param_key = self.opt['path'].get('param_key_g', 'params')
- self.load_network(self.net_g, load_path, self.opt['path'].get('strict_load_g', True), param_key)
-
- self.log_size = int(math.log(self.opt['network_g']['out_size'], 2))
-
- if self.is_train:
- self.init_training_settings()
-
- def init_training_settings(self):
- train_opt = self.opt['train']
-
- # ----------- define net_d ----------- #
- self.net_d = build_network(self.opt['network_d'])
- self.net_d = self.model_to_device(self.net_d)
- self.print_network(self.net_d)
- # load pretrained model
- load_path = self.opt['path'].get('pretrain_network_d', None)
- if load_path is not None:
- self.load_network(self.net_d, load_path, self.opt['path'].get('strict_load_d', True))
-
- # ----------- define net_g with Exponential Moving Average (EMA) ----------- #
- # net_g_ema only used for testing on one GPU and saving. There is no need to wrap with DistributedDataParallel
- self.net_g_ema = build_network(self.opt['network_g']).to(self.device)
- # load pretrained model
- load_path = self.opt['path'].get('pretrain_network_g', None)
- if load_path is not None:
- self.load_network(self.net_g_ema, load_path, self.opt['path'].get('strict_load_g', True), 'params_ema')
- else:
- self.model_ema(0) # copy net_g weight
-
- self.net_g.train()
- self.net_d.train()
- self.net_g_ema.eval()
-
- # ----------- facial component networks ----------- #
- if ('network_d_left_eye' in self.opt and 'network_d_right_eye' in self.opt and 'network_d_mouth' in self.opt):
- self.use_facial_disc = True
- else:
- self.use_facial_disc = False
-
- if self.use_facial_disc:
- # left eye
- self.net_d_left_eye = build_network(self.opt['network_d_left_eye'])
- self.net_d_left_eye = self.model_to_device(self.net_d_left_eye)
- self.print_network(self.net_d_left_eye)
- load_path = self.opt['path'].get('pretrain_network_d_left_eye')
- if load_path is not None:
- self.load_network(self.net_d_left_eye, load_path, True, 'params')
- # right eye
- self.net_d_right_eye = build_network(self.opt['network_d_right_eye'])
- self.net_d_right_eye = self.model_to_device(self.net_d_right_eye)
- self.print_network(self.net_d_right_eye)
- load_path = self.opt['path'].get('pretrain_network_d_right_eye')
- if load_path is not None:
- self.load_network(self.net_d_right_eye, load_path, True, 'params')
- # mouth
- self.net_d_mouth = build_network(self.opt['network_d_mouth'])
- self.net_d_mouth = self.model_to_device(self.net_d_mouth)
- self.print_network(self.net_d_mouth)
- load_path = self.opt['path'].get('pretrain_network_d_mouth')
- if load_path is not None:
- self.load_network(self.net_d_mouth, load_path, True, 'params')
-
- self.net_d_left_eye.train()
- self.net_d_right_eye.train()
- self.net_d_mouth.train()
-
- # ----------- define facial component gan loss ----------- #
- self.cri_component = build_loss(train_opt['gan_component_opt']).to(self.device)
-
- # ----------- define losses ----------- #
- # pixel loss
- if train_opt.get('pixel_opt'):
- self.cri_pix = build_loss(train_opt['pixel_opt']).to(self.device)
- else:
- self.cri_pix = None
-
- # perceptual loss
- if train_opt.get('perceptual_opt'):
- self.cri_perceptual = build_loss(train_opt['perceptual_opt']).to(self.device)
- else:
- self.cri_perceptual = None
-
- # L1 loss is used in pyramid loss, component style loss and identity loss
- self.cri_l1 = build_loss(train_opt['L1_opt']).to(self.device)
-
- # gan loss (wgan)
- self.cri_gan = build_loss(train_opt['gan_opt']).to(self.device)
-
- # ----------- define identity loss ----------- #
- if 'network_identity' in self.opt:
- self.use_identity = True
- else:
- self.use_identity = False
-
- if self.use_identity:
- # define identity network
- self.network_identity = build_network(self.opt['network_identity'])
- self.network_identity = self.model_to_device(self.network_identity)
- self.print_network(self.network_identity)
- load_path = self.opt['path'].get('pretrain_network_identity')
- if load_path is not None:
- self.load_network(self.network_identity, load_path, True, None)
- self.network_identity.eval()
- for param in self.network_identity.parameters():
- param.requires_grad = False
-
- # regularization weights
- self.r1_reg_weight = train_opt['r1_reg_weight'] # for discriminator
- self.net_d_iters = train_opt.get('net_d_iters', 1)
- self.net_d_init_iters = train_opt.get('net_d_init_iters', 0)
- self.net_d_reg_every = train_opt['net_d_reg_every']
-
- # set up optimizers and schedulers
- self.setup_optimizers()
- self.setup_schedulers()
-
- def setup_optimizers(self):
- train_opt = self.opt['train']
-
- # ----------- optimizer g ----------- #
- net_g_reg_ratio = 1
- normal_params = []
- for _, param in self.net_g.named_parameters():
- normal_params.append(param)
- optim_params_g = [{ # add normal params first
- 'params': normal_params,
- 'lr': train_opt['optim_g']['lr']
- }]
- optim_type = train_opt['optim_g'].pop('type')
- lr = train_opt['optim_g']['lr'] * net_g_reg_ratio
- betas = (0**net_g_reg_ratio, 0.99**net_g_reg_ratio)
- self.optimizer_g = self.get_optimizer(optim_type, optim_params_g, lr, betas=betas)
- self.optimizers.append(self.optimizer_g)
-
- # ----------- optimizer d ----------- #
- net_d_reg_ratio = self.net_d_reg_every / (self.net_d_reg_every + 1)
- normal_params = []
- for _, param in self.net_d.named_parameters():
- normal_params.append(param)
- optim_params_d = [{ # add normal params first
- 'params': normal_params,
- 'lr': train_opt['optim_d']['lr']
- }]
- optim_type = train_opt['optim_d'].pop('type')
- lr = train_opt['optim_d']['lr'] * net_d_reg_ratio
- betas = (0**net_d_reg_ratio, 0.99**net_d_reg_ratio)
- self.optimizer_d = self.get_optimizer(optim_type, optim_params_d, lr, betas=betas)
- self.optimizers.append(self.optimizer_d)
-
- # ----------- optimizers for facial component networks ----------- #
- if self.use_facial_disc:
- # setup optimizers for facial component discriminators
- optim_type = train_opt['optim_component'].pop('type')
- lr = train_opt['optim_component']['lr']
- # left eye
- self.optimizer_d_left_eye = self.get_optimizer(
- optim_type, self.net_d_left_eye.parameters(), lr, betas=(0.9, 0.99))
- self.optimizers.append(self.optimizer_d_left_eye)
- # right eye
- self.optimizer_d_right_eye = self.get_optimizer(
- optim_type, self.net_d_right_eye.parameters(), lr, betas=(0.9, 0.99))
- self.optimizers.append(self.optimizer_d_right_eye)
- # mouth
- self.optimizer_d_mouth = self.get_optimizer(
- optim_type, self.net_d_mouth.parameters(), lr, betas=(0.9, 0.99))
- self.optimizers.append(self.optimizer_d_mouth)
-
- def feed_data(self, data):
- self.lq = data['lq'].to(self.device)
- if 'gt' in data:
- self.gt = data['gt'].to(self.device)
-
- if 'loc_left_eye' in data:
- # get facial component locations, shape (batch, 4)
- self.loc_left_eyes = data['loc_left_eye']
- self.loc_right_eyes = data['loc_right_eye']
- self.loc_mouths = data['loc_mouth']
-
- # uncomment to check data
- # import torchvision
- # if self.opt['rank'] == 0:
- # import os
- # os.makedirs('tmp/gt', exist_ok=True)
- # os.makedirs('tmp/lq', exist_ok=True)
- # print(self.idx)
- # torchvision.utils.save_image(
- # self.gt, f'tmp/gt/gt_{self.idx}.png', nrow=4, padding=2, normalize=True, range=(-1, 1))
- # torchvision.utils.save_image(
- # self.lq, f'tmp/lq/lq{self.idx}.png', nrow=4, padding=2, normalize=True, range=(-1, 1))
- # self.idx = self.idx + 1
-
- def construct_img_pyramid(self):
- """Construct image pyramid for intermediate restoration loss"""
- pyramid_gt = [self.gt]
- down_img = self.gt
- for _ in range(0, self.log_size - 3):
- down_img = F.interpolate(down_img, scale_factor=0.5, mode='bilinear', align_corners=False)
- pyramid_gt.insert(0, down_img)
- return pyramid_gt
-
- def get_roi_regions(self, eye_out_size=80, mouth_out_size=120):
- face_ratio = int(self.opt['network_g']['out_size'] / 512)
- eye_out_size *= face_ratio
- mouth_out_size *= face_ratio
-
- rois_eyes = []
- rois_mouths = []
- for b in range(self.loc_left_eyes.size(0)): # loop for batch size
- # left eye and right eye
- img_inds = self.loc_left_eyes.new_full((2, 1), b)
- bbox = torch.stack([self.loc_left_eyes[b, :], self.loc_right_eyes[b, :]], dim=0) # shape: (2, 4)
- rois = torch.cat([img_inds, bbox], dim=-1) # shape: (2, 5)
- rois_eyes.append(rois)
- # mouse
- img_inds = self.loc_left_eyes.new_full((1, 1), b)
- rois = torch.cat([img_inds, self.loc_mouths[b:b + 1, :]], dim=-1) # shape: (1, 5)
- rois_mouths.append(rois)
-
- rois_eyes = torch.cat(rois_eyes, 0).to(self.device)
- rois_mouths = torch.cat(rois_mouths, 0).to(self.device)
-
- # real images
- all_eyes = roi_align(self.gt, boxes=rois_eyes, output_size=eye_out_size) * face_ratio
- self.left_eyes_gt = all_eyes[0::2, :, :, :]
- self.right_eyes_gt = all_eyes[1::2, :, :, :]
- self.mouths_gt = roi_align(self.gt, boxes=rois_mouths, output_size=mouth_out_size) * face_ratio
- # output
- all_eyes = roi_align(self.output, boxes=rois_eyes, output_size=eye_out_size) * face_ratio
- self.left_eyes = all_eyes[0::2, :, :, :]
- self.right_eyes = all_eyes[1::2, :, :, :]
- self.mouths = roi_align(self.output, boxes=rois_mouths, output_size=mouth_out_size) * face_ratio
-
- def _gram_mat(self, x):
- """Calculate Gram matrix.
-
- Args:
- x (torch.Tensor): Tensor with shape of (n, c, h, w).
-
- Returns:
- torch.Tensor: Gram matrix.
- """
- n, c, h, w = x.size()
- features = x.view(n, c, w * h)
- features_t = features.transpose(1, 2)
- gram = features.bmm(features_t) / (c * h * w)
- return gram
-
- def gray_resize_for_identity(self, out, size=128):
- out_gray = (0.2989 * out[:, 0, :, :] + 0.5870 * out[:, 1, :, :] + 0.1140 * out[:, 2, :, :])
- out_gray = out_gray.unsqueeze(1)
- out_gray = F.interpolate(out_gray, (size, size), mode='bilinear', align_corners=False)
- return out_gray
-
- def optimize_parameters(self, current_iter):
- # optimize net_g
- for p in self.net_d.parameters():
- p.requires_grad = False
- self.optimizer_g.zero_grad()
-
- # do not update facial component net_d
- if self.use_facial_disc:
- for p in self.net_d_left_eye.parameters():
- p.requires_grad = False
- for p in self.net_d_right_eye.parameters():
- p.requires_grad = False
- for p in self.net_d_mouth.parameters():
- p.requires_grad = False
-
- # image pyramid loss weight
- if current_iter < self.opt['train'].get('remove_pyramid_loss', float('inf')):
- pyramid_loss_weight = self.opt['train'].get('pyramid_loss_weight', 1)
- else:
- pyramid_loss_weight = 1e-12 # very small loss
- if pyramid_loss_weight > 0:
- self.output, out_rgbs = self.net_g(self.lq, return_rgb=True)
- pyramid_gt = self.construct_img_pyramid()
- else:
- self.output, out_rgbs = self.net_g(self.lq, return_rgb=False)
-
- # get roi-align regions
- if self.use_facial_disc:
- self.get_roi_regions(eye_out_size=80, mouth_out_size=120)
-
- l_g_total = 0
- loss_dict = OrderedDict()
- if (current_iter % self.net_d_iters == 0 and current_iter > self.net_d_init_iters):
- # pixel loss
- if self.cri_pix:
- l_g_pix = self.cri_pix(self.output, self.gt)
- l_g_total += l_g_pix
- loss_dict['l_g_pix'] = l_g_pix
-
- # image pyramid loss
- if pyramid_loss_weight > 0:
- for i in range(0, self.log_size - 2):
- l_pyramid = self.cri_l1(out_rgbs[i], pyramid_gt[i]) * pyramid_loss_weight
- l_g_total += l_pyramid
- loss_dict[f'l_p_{2**(i+3)}'] = l_pyramid
-
- # perceptual loss
- if self.cri_perceptual:
- l_g_percep, l_g_style = self.cri_perceptual(self.output, self.gt)
- if l_g_percep is not None:
- l_g_total += l_g_percep
- loss_dict['l_g_percep'] = l_g_percep
- if l_g_style is not None:
- l_g_total += l_g_style
- loss_dict['l_g_style'] = l_g_style
-
- # gan loss
- fake_g_pred = self.net_d(self.output)
- l_g_gan = self.cri_gan(fake_g_pred, True, is_disc=False)
- l_g_total += l_g_gan
- loss_dict['l_g_gan'] = l_g_gan
-
- # facial component loss
- if self.use_facial_disc:
- # left eye
- fake_left_eye, fake_left_eye_feats = self.net_d_left_eye(self.left_eyes, return_feats=True)
- l_g_gan = self.cri_component(fake_left_eye, True, is_disc=False)
- l_g_total += l_g_gan
- loss_dict['l_g_gan_left_eye'] = l_g_gan
- # right eye
- fake_right_eye, fake_right_eye_feats = self.net_d_right_eye(self.right_eyes, return_feats=True)
- l_g_gan = self.cri_component(fake_right_eye, True, is_disc=False)
- l_g_total += l_g_gan
- loss_dict['l_g_gan_right_eye'] = l_g_gan
- # mouth
- fake_mouth, fake_mouth_feats = self.net_d_mouth(self.mouths, return_feats=True)
- l_g_gan = self.cri_component(fake_mouth, True, is_disc=False)
- l_g_total += l_g_gan
- loss_dict['l_g_gan_mouth'] = l_g_gan
-
- if self.opt['train'].get('comp_style_weight', 0) > 0:
- # get gt feat
- _, real_left_eye_feats = self.net_d_left_eye(self.left_eyes_gt, return_feats=True)
- _, real_right_eye_feats = self.net_d_right_eye(self.right_eyes_gt, return_feats=True)
- _, real_mouth_feats = self.net_d_mouth(self.mouths_gt, return_feats=True)
-
- def _comp_style(feat, feat_gt, criterion):
- return criterion(self._gram_mat(feat[0]), self._gram_mat(
- feat_gt[0].detach())) * 0.5 + criterion(
- self._gram_mat(feat[1]), self._gram_mat(feat_gt[1].detach()))
-
- # facial component style loss
- comp_style_loss = 0
- comp_style_loss += _comp_style(fake_left_eye_feats, real_left_eye_feats, self.cri_l1)
- comp_style_loss += _comp_style(fake_right_eye_feats, real_right_eye_feats, self.cri_l1)
- comp_style_loss += _comp_style(fake_mouth_feats, real_mouth_feats, self.cri_l1)
- comp_style_loss = comp_style_loss * self.opt['train']['comp_style_weight']
- l_g_total += comp_style_loss
- loss_dict['l_g_comp_style_loss'] = comp_style_loss
-
- # identity loss
- if self.use_identity:
- identity_weight = self.opt['train']['identity_weight']
- # get gray images and resize
- out_gray = self.gray_resize_for_identity(self.output)
- gt_gray = self.gray_resize_for_identity(self.gt)
-
- identity_gt = self.network_identity(gt_gray).detach()
- identity_out = self.network_identity(out_gray)
- l_identity = self.cri_l1(identity_out, identity_gt) * identity_weight
- l_g_total += l_identity
- loss_dict['l_identity'] = l_identity
-
- l_g_total.backward()
- self.optimizer_g.step()
-
- # EMA
- self.model_ema(decay=0.5**(32 / (10 * 1000)))
-
- # ----------- optimize net_d ----------- #
- for p in self.net_d.parameters():
- p.requires_grad = True
- self.optimizer_d.zero_grad()
- if self.use_facial_disc:
- for p in self.net_d_left_eye.parameters():
- p.requires_grad = True
- for p in self.net_d_right_eye.parameters():
- p.requires_grad = True
- for p in self.net_d_mouth.parameters():
- p.requires_grad = True
- self.optimizer_d_left_eye.zero_grad()
- self.optimizer_d_right_eye.zero_grad()
- self.optimizer_d_mouth.zero_grad()
-
- fake_d_pred = self.net_d(self.output.detach())
- real_d_pred = self.net_d(self.gt)
- l_d = self.cri_gan(real_d_pred, True, is_disc=True) + self.cri_gan(fake_d_pred, False, is_disc=True)
- loss_dict['l_d'] = l_d
- # In WGAN, real_score should be positive and fake_score should be negative
- loss_dict['real_score'] = real_d_pred.detach().mean()
- loss_dict['fake_score'] = fake_d_pred.detach().mean()
- l_d.backward()
-
- # regularization loss
- if current_iter % self.net_d_reg_every == 0:
- self.gt.requires_grad = True
- real_pred = self.net_d(self.gt)
- l_d_r1 = r1_penalty(real_pred, self.gt)
- l_d_r1 = (self.r1_reg_weight / 2 * l_d_r1 * self.net_d_reg_every + 0 * real_pred[0])
- loss_dict['l_d_r1'] = l_d_r1.detach().mean()
- l_d_r1.backward()
-
- self.optimizer_d.step()
-
- # optimize facial component discriminators
- if self.use_facial_disc:
- # left eye
- fake_d_pred, _ = self.net_d_left_eye(self.left_eyes.detach())
- real_d_pred, _ = self.net_d_left_eye(self.left_eyes_gt)
- l_d_left_eye = self.cri_component(
- real_d_pred, True, is_disc=True) + self.cri_gan(
- fake_d_pred, False, is_disc=True)
- loss_dict['l_d_left_eye'] = l_d_left_eye
- l_d_left_eye.backward()
- # right eye
- fake_d_pred, _ = self.net_d_right_eye(self.right_eyes.detach())
- real_d_pred, _ = self.net_d_right_eye(self.right_eyes_gt)
- l_d_right_eye = self.cri_component(
- real_d_pred, True, is_disc=True) + self.cri_gan(
- fake_d_pred, False, is_disc=True)
- loss_dict['l_d_right_eye'] = l_d_right_eye
- l_d_right_eye.backward()
- # mouth
- fake_d_pred, _ = self.net_d_mouth(self.mouths.detach())
- real_d_pred, _ = self.net_d_mouth(self.mouths_gt)
- l_d_mouth = self.cri_component(
- real_d_pred, True, is_disc=True) + self.cri_gan(
- fake_d_pred, False, is_disc=True)
- loss_dict['l_d_mouth'] = l_d_mouth
- l_d_mouth.backward()
-
- self.optimizer_d_left_eye.step()
- self.optimizer_d_right_eye.step()
- self.optimizer_d_mouth.step()
-
- self.log_dict = self.reduce_loss_dict(loss_dict)
-
- def test(self):
- with torch.no_grad():
- if hasattr(self, 'net_g_ema'):
- self.net_g_ema.eval()
- self.output, _ = self.net_g_ema(self.lq)
- else:
- logger = get_root_logger()
- logger.warning('Do not have self.net_g_ema, use self.net_g.')
- self.net_g.eval()
- self.output, _ = self.net_g(self.lq)
- self.net_g.train()
-
- def dist_validation(self, dataloader, current_iter, tb_logger, save_img):
- if self.opt['rank'] == 0:
- self.nondist_validation(dataloader, current_iter, tb_logger, save_img)
-
- def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
- dataset_name = dataloader.dataset.opt['name']
- with_metrics = self.opt['val'].get('metrics') is not None
- use_pbar = self.opt['val'].get('pbar', False)
-
- if with_metrics:
- if not hasattr(self, 'metric_results'): # only execute in the first run
- self.metric_results = {metric: 0 for metric in self.opt['val']['metrics'].keys()}
- # initialize the best metric results for each dataset_name (supporting multiple validation datasets)
- self._initialize_best_metric_results(dataset_name)
- # zero self.metric_results
- self.metric_results = {metric: 0 for metric in self.metric_results}
-
- metric_data = dict()
- if use_pbar:
- pbar = tqdm(total=len(dataloader), unit='image')
-
- for idx, val_data in enumerate(dataloader):
- img_name = osp.splitext(osp.basename(val_data['lq_path'][0]))[0]
- self.feed_data(val_data)
- self.test()
-
- sr_img = tensor2img(self.output.detach().cpu(), min_max=(-1, 1))
- metric_data['img'] = sr_img
- if hasattr(self, 'gt'):
- gt_img = tensor2img(self.gt.detach().cpu(), min_max=(-1, 1))
- metric_data['img2'] = gt_img
- del self.gt
-
- # tentative for out of GPU memory
- del self.lq
- del self.output
- torch.cuda.empty_cache()
-
- if save_img:
- if self.opt['is_train']:
- save_img_path = osp.join(self.opt['path']['visualization'], img_name,
- f'{img_name}_{current_iter}.png')
- else:
- if self.opt['val']['suffix']:
- save_img_path = osp.join(self.opt['path']['visualization'], dataset_name,
- f'{img_name}_{self.opt["val"]["suffix"]}.png')
- else:
- save_img_path = osp.join(self.opt['path']['visualization'], dataset_name,
- f'{img_name}_{self.opt["name"]}.png')
- imwrite(sr_img, save_img_path)
-
- if with_metrics:
- # calculate metrics
- for name, opt_ in self.opt['val']['metrics'].items():
- self.metric_results[name] += calculate_metric(metric_data, opt_)
- if use_pbar:
- pbar.update(1)
- pbar.set_description(f'Test {img_name}')
- if use_pbar:
- pbar.close()
-
- if with_metrics:
- for metric in self.metric_results.keys():
- self.metric_results[metric] /= (idx + 1)
- # update the best metric result
- self._update_best_metric_result(dataset_name, metric, self.metric_results[metric], current_iter)
-
- self._log_validation_metric_values(current_iter, dataset_name, tb_logger)
-
- def _log_validation_metric_values(self, current_iter, dataset_name, tb_logger):
- log_str = f'Validation {dataset_name}\n'
- for metric, value in self.metric_results.items():
- log_str += f'\t # {metric}: {value:.4f}'
- if hasattr(self, 'best_metric_results'):
- log_str += (f'\tBest: {self.best_metric_results[dataset_name][metric]["val"]:.4f} @ '
- f'{self.best_metric_results[dataset_name][metric]["iter"]} iter')
- log_str += '\n'
-
- logger = get_root_logger()
- logger.info(log_str)
- if tb_logger:
- for metric, value in self.metric_results.items():
- tb_logger.add_scalar(f'metrics/{dataset_name}/{metric}', value, current_iter)
-
- def save(self, epoch, current_iter):
- # save net_g and net_d
- self.save_network([self.net_g, self.net_g_ema], 'net_g', current_iter, param_key=['params', 'params_ema'])
- self.save_network(self.net_d, 'net_d', current_iter)
- # save component discriminators
- if self.use_facial_disc:
- self.save_network(self.net_d_left_eye, 'net_d_left_eye', current_iter)
- self.save_network(self.net_d_right_eye, 'net_d_right_eye', current_iter)
- self.save_network(self.net_d_mouth, 'net_d_mouth', current_iter)
- # save training state
- self.save_training_state(epoch, current_iter)
diff --git a/spaces/CikeyQI/meme-api/meme_generator/memes/stretch/__init__.py b/spaces/CikeyQI/meme-api/meme_generator/memes/stretch/__init__.py
deleted file mode 100644
index d8c5fd26bbb635ad0e854572e25fc17723cf1af0..0000000000000000000000000000000000000000
--- a/spaces/CikeyQI/meme-api/meme_generator/memes/stretch/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from pathlib import Path
-from typing import List
-
-from meme_generator import add_meme
-from pil_utils import BuildImage
-
-img_dir = Path(__file__).parent / "images"
-
-
-def stretch(images: List[BuildImage], texts, args):
- frame = BuildImage.open(img_dir / "0.png")
- frame.paste(
- images[0].convert("RGBA").resize((500, 500), keep_ratio=True), below=True
- )
- return frame.save_jpg()
-
-
-add_meme("stretch", stretch, min_images=1, max_images=1, keywords=["双手", "伸展"])
diff --git a/spaces/Codecooker/rvcapi/src/infer_pack/models.py b/spaces/Codecooker/rvcapi/src/infer_pack/models.py
deleted file mode 100644
index 5e4b2e72383efaee1fae4f5c42e3db2c627e4190..0000000000000000000000000000000000000000
--- a/spaces/Codecooker/rvcapi/src/infer_pack/models.py
+++ /dev/null
@@ -1,1124 +0,0 @@
-import math, pdb, os
-from time import time as ttime
-import torch
-from torch import nn
-from torch.nn import functional as F
-from infer_pack import modules
-from infer_pack import attentions
-from infer_pack import commons
-from infer_pack.commons import init_weights, get_padding
-from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
-from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
-from infer_pack.commons import init_weights
-import numpy as np
-from infer_pack import commons
-
-
-class TextEncoder256(nn.Module):
- def __init__(
- self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=True,
- ):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emb_phone = nn.Linear(256, hidden_channels)
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
- if f0 == True:
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
- self.encoder = attentions.Encoder(
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, phone, pitch, lengths):
- if pitch == None:
- x = self.emb_phone(phone)
- else:
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
- x = self.lrelu(x)
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return m, logs, x_mask
-
-
-class TextEncoder768(nn.Module):
- def __init__(
- self,
- out_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=True,
- ):
- super().__init__()
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.emb_phone = nn.Linear(768, hidden_channels)
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
- if f0 == True:
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
- self.encoder = attentions.Encoder(
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, phone, pitch, lengths):
- if pitch == None:
- x = self.emb_phone(phone)
- else:
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
- x = self.lrelu(x)
- x = torch.transpose(x, 1, -1) # [b, h, t]
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.encoder(x * x_mask, x_mask)
- stats = self.proj(x) * x_mask
-
- m, logs = torch.split(stats, self.out_channels, dim=1)
- return m, logs, x_mask
-
-
-class ResidualCouplingBlock(nn.Module):
- def __init__(
- self,
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- n_flows=4,
- gin_channels=0,
- ):
- super().__init__()
- self.channels = channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.n_flows = n_flows
- self.gin_channels = gin_channels
-
- self.flows = nn.ModuleList()
- for i in range(n_flows):
- self.flows.append(
- modules.ResidualCouplingLayer(
- channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- mean_only=True,
- )
- )
- self.flows.append(modules.Flip())
-
- def forward(self, x, x_mask, g=None, reverse=False):
- if not reverse:
- for flow in self.flows:
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
- else:
- for flow in reversed(self.flows):
- x = flow(x, x_mask, g=g, reverse=reverse)
- return x
-
- def remove_weight_norm(self):
- for i in range(self.n_flows):
- self.flows[i * 2].remove_weight_norm()
-
-
-class PosteriorEncoder(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=0,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.hidden_channels = hidden_channels
- self.kernel_size = kernel_size
- self.dilation_rate = dilation_rate
- self.n_layers = n_layers
- self.gin_channels = gin_channels
-
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
- self.enc = modules.WN(
- hidden_channels,
- kernel_size,
- dilation_rate,
- n_layers,
- gin_channels=gin_channels,
- )
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
-
- def forward(self, x, x_lengths, g=None):
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
- x.dtype
- )
- x = self.pre(x) * x_mask
- x = self.enc(x, x_mask, g=g)
- stats = self.proj(x) * x_mask
- m, logs = torch.split(stats, self.out_channels, dim=1)
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
- return z, m, logs, x_mask
-
- def remove_weight_norm(self):
- self.enc.remove_weight_norm()
-
-
-class Generator(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=0,
- ):
- super(Generator, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- def forward(self, x, g=None):
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
-
- return x
-
- def remove_weight_norm(self):
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-class SineGen(torch.nn.Module):
- """Definition of sine generator
- SineGen(samp_rate, harmonic_num = 0,
- sine_amp = 0.1, noise_std = 0.003,
- voiced_threshold = 0,
- flag_for_pulse=False)
- samp_rate: sampling rate in Hz
- harmonic_num: number of harmonic overtones (default 0)
- sine_amp: amplitude of sine-wavefrom (default 0.1)
- noise_std: std of Gaussian noise (default 0.003)
- voiced_thoreshold: F0 threshold for U/V classification (default 0)
- flag_for_pulse: this SinGen is used inside PulseGen (default False)
- Note: when flag_for_pulse is True, the first time step of a voiced
- segment is always sin(np.pi) or cos(0)
- """
-
- def __init__(
- self,
- samp_rate,
- harmonic_num=0,
- sine_amp=0.1,
- noise_std=0.003,
- voiced_threshold=0,
- flag_for_pulse=False,
- ):
- super(SineGen, self).__init__()
- self.sine_amp = sine_amp
- self.noise_std = noise_std
- self.harmonic_num = harmonic_num
- self.dim = self.harmonic_num + 1
- self.sampling_rate = samp_rate
- self.voiced_threshold = voiced_threshold
-
- def _f02uv(self, f0):
- # generate uv signal
- uv = torch.ones_like(f0)
- uv = uv * (f0 > self.voiced_threshold)
- return uv
-
- def forward(self, f0, upp):
- """sine_tensor, uv = forward(f0)
- input F0: tensor(batchsize=1, length, dim=1)
- f0 for unvoiced steps should be 0
- output sine_tensor: tensor(batchsize=1, length, dim)
- output uv: tensor(batchsize=1, length, 1)
- """
- with torch.no_grad():
- f0 = f0[:, None].transpose(1, 2)
- f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
- # fundamental component
- f0_buf[:, :, 0] = f0[:, :, 0]
- for idx in np.arange(self.harmonic_num):
- f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
- idx + 2
- ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
- rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
- rand_ini = torch.rand(
- f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
- )
- rand_ini[:, 0] = 0
- rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
- tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
- tmp_over_one *= upp
- tmp_over_one = F.interpolate(
- tmp_over_one.transpose(2, 1),
- scale_factor=upp,
- mode="linear",
- align_corners=True,
- ).transpose(2, 1)
- rad_values = F.interpolate(
- rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
- ).transpose(
- 2, 1
- ) #######
- tmp_over_one %= 1
- tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
- cumsum_shift = torch.zeros_like(rad_values)
- cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
- sine_waves = torch.sin(
- torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
- )
- sine_waves = sine_waves * self.sine_amp
- uv = self._f02uv(f0)
- uv = F.interpolate(
- uv.transpose(2, 1), scale_factor=upp, mode="nearest"
- ).transpose(2, 1)
- noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
- noise = noise_amp * torch.randn_like(sine_waves)
- sine_waves = sine_waves * uv + noise
- return sine_waves, uv, noise
-
-
-class SourceModuleHnNSF(torch.nn.Module):
- """SourceModule for hn-nsf
- SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
- add_noise_std=0.003, voiced_threshod=0)
- sampling_rate: sampling_rate in Hz
- harmonic_num: number of harmonic above F0 (default: 0)
- sine_amp: amplitude of sine source signal (default: 0.1)
- add_noise_std: std of additive Gaussian noise (default: 0.003)
- note that amplitude of noise in unvoiced is decided
- by sine_amp
- voiced_threshold: threhold to set U/V given F0 (default: 0)
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
- F0_sampled (batchsize, length, 1)
- Sine_source (batchsize, length, 1)
- noise_source (batchsize, length 1)
- uv (batchsize, length, 1)
- """
-
- def __init__(
- self,
- sampling_rate,
- harmonic_num=0,
- sine_amp=0.1,
- add_noise_std=0.003,
- voiced_threshod=0,
- is_half=True,
- ):
- super(SourceModuleHnNSF, self).__init__()
-
- self.sine_amp = sine_amp
- self.noise_std = add_noise_std
- self.is_half = is_half
- # to produce sine waveforms
- self.l_sin_gen = SineGen(
- sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
- )
-
- # to merge source harmonics into a single excitation
- self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
- self.l_tanh = torch.nn.Tanh()
-
- def forward(self, x, upp=None):
- sine_wavs, uv, _ = self.l_sin_gen(x, upp)
- if self.is_half:
- sine_wavs = sine_wavs.half()
- sine_merge = self.l_tanh(self.l_linear(sine_wavs))
- return sine_merge, None, None # noise, uv
-
-
-class GeneratorNSF(torch.nn.Module):
- def __init__(
- self,
- initial_channel,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels,
- sr,
- is_half=False,
- ):
- super(GeneratorNSF, self).__init__()
- self.num_kernels = len(resblock_kernel_sizes)
- self.num_upsamples = len(upsample_rates)
-
- self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
- self.m_source = SourceModuleHnNSF(
- sampling_rate=sr, harmonic_num=0, is_half=is_half
- )
- self.noise_convs = nn.ModuleList()
- self.conv_pre = Conv1d(
- initial_channel, upsample_initial_channel, 7, 1, padding=3
- )
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
-
- self.ups = nn.ModuleList()
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
- c_cur = upsample_initial_channel // (2 ** (i + 1))
- self.ups.append(
- weight_norm(
- ConvTranspose1d(
- upsample_initial_channel // (2**i),
- upsample_initial_channel // (2 ** (i + 1)),
- k,
- u,
- padding=(k - u) // 2,
- )
- )
- )
- if i + 1 < len(upsample_rates):
- stride_f0 = np.prod(upsample_rates[i + 1 :])
- self.noise_convs.append(
- Conv1d(
- 1,
- c_cur,
- kernel_size=stride_f0 * 2,
- stride=stride_f0,
- padding=stride_f0 // 2,
- )
- )
- else:
- self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
-
- self.resblocks = nn.ModuleList()
- for i in range(len(self.ups)):
- ch = upsample_initial_channel // (2 ** (i + 1))
- for j, (k, d) in enumerate(
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
- ):
- self.resblocks.append(resblock(ch, k, d))
-
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
- self.ups.apply(init_weights)
-
- if gin_channels != 0:
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
-
- self.upp = np.prod(upsample_rates)
-
- def forward(self, x, f0, g=None):
- har_source, noi_source, uv = self.m_source(f0, self.upp)
- har_source = har_source.transpose(1, 2)
- x = self.conv_pre(x)
- if g is not None:
- x = x + self.cond(g)
-
- for i in range(self.num_upsamples):
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- x = self.ups[i](x)
- x_source = self.noise_convs[i](har_source)
- x = x + x_source
- xs = None
- for j in range(self.num_kernels):
- if xs is None:
- xs = self.resblocks[i * self.num_kernels + j](x)
- else:
- xs += self.resblocks[i * self.num_kernels + j](x)
- x = xs / self.num_kernels
- x = F.leaky_relu(x)
- x = self.conv_post(x)
- x = torch.tanh(x)
- return x
-
- def remove_weight_norm(self):
- for l in self.ups:
- remove_weight_norm(l)
- for l in self.resblocks:
- l.remove_weight_norm()
-
-
-sr2sr = {
- "32k": 32000,
- "40k": 40000,
- "48k": 48000,
-}
-
-
-class SynthesizerTrnMs256NSFsid(nn.Module):
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- gin_channels,
- sr,
- **kwargs
- ):
- super().__init__()
- if type(sr) == type("strr"):
- sr = sr2sr[sr]
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- self.enc_p = TextEncoder256(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- )
- self.dec = GeneratorNSF(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- sr=sr,
- is_half=kwargs["is_half"],
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def forward(
- self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
- ): # 这里ds是id,[bs,1]
- # print(1,pitch.shape)#[bs,t]
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
- z_slice, ids_slice = commons.rand_slice_segments(
- z, y_lengths, self.segment_size
- )
- # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
- pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
- # print(-2,pitchf.shape,z_slice.shape)
- o = self.dec(z_slice, pitchf, g=g)
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
- g = self.emb_g(sid).unsqueeze(-1)
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
- z = self.flow(z_p, x_mask, g=g, reverse=True)
- o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
- return o, x_mask, (z, z_p, m_p, logs_p)
-
-
-class SynthesizerTrnMs768NSFsid(nn.Module):
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- gin_channels,
- sr,
- **kwargs
- ):
- super().__init__()
- if type(sr) == type("strr"):
- sr = sr2sr[sr]
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- self.enc_p = TextEncoder768(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- )
- self.dec = GeneratorNSF(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- sr=sr,
- is_half=kwargs["is_half"],
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def forward(
- self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds
- ): # 这里ds是id,[bs,1]
- # print(1,pitch.shape)#[bs,t]
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
- z_slice, ids_slice = commons.rand_slice_segments(
- z, y_lengths, self.segment_size
- )
- # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)
- pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
- # print(-2,pitchf.shape,z_slice.shape)
- o = self.dec(z_slice, pitchf, g=g)
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
- g = self.emb_g(sid).unsqueeze(-1)
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
- z = self.flow(z_p, x_mask, g=g, reverse=True)
- o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
- return o, x_mask, (z, z_p, m_p, logs_p)
-
-
-class SynthesizerTrnMs256NSFsid_nono(nn.Module):
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- gin_channels,
- sr=None,
- **kwargs
- ):
- super().__init__()
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- self.enc_p = TextEncoder256(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=False,
- )
- self.dec = Generator(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
- z_slice, ids_slice = commons.rand_slice_segments(
- z, y_lengths, self.segment_size
- )
- o = self.dec(z_slice, g=g)
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, phone, phone_lengths, sid, max_len=None):
- g = self.emb_g(sid).unsqueeze(-1)
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
- z = self.flow(z_p, x_mask, g=g, reverse=True)
- o = self.dec((z * x_mask)[:, :, :max_len], g=g)
- return o, x_mask, (z, z_p, m_p, logs_p)
-
-
-class SynthesizerTrnMs768NSFsid_nono(nn.Module):
- def __init__(
- self,
- spec_channels,
- segment_size,
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- spk_embed_dim,
- gin_channels,
- sr=None,
- **kwargs
- ):
- super().__init__()
- self.spec_channels = spec_channels
- self.inter_channels = inter_channels
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.resblock = resblock
- self.resblock_kernel_sizes = resblock_kernel_sizes
- self.resblock_dilation_sizes = resblock_dilation_sizes
- self.upsample_rates = upsample_rates
- self.upsample_initial_channel = upsample_initial_channel
- self.upsample_kernel_sizes = upsample_kernel_sizes
- self.segment_size = segment_size
- self.gin_channels = gin_channels
- # self.hop_length = hop_length#
- self.spk_embed_dim = spk_embed_dim
- self.enc_p = TextEncoder768(
- inter_channels,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size,
- p_dropout,
- f0=False,
- )
- self.dec = Generator(
- inter_channels,
- resblock,
- resblock_kernel_sizes,
- resblock_dilation_sizes,
- upsample_rates,
- upsample_initial_channel,
- upsample_kernel_sizes,
- gin_channels=gin_channels,
- )
- self.enc_q = PosteriorEncoder(
- spec_channels,
- inter_channels,
- hidden_channels,
- 5,
- 1,
- 16,
- gin_channels=gin_channels,
- )
- self.flow = ResidualCouplingBlock(
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
- )
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
-
- def remove_weight_norm(self):
- self.dec.remove_weight_norm()
- self.flow.remove_weight_norm()
- self.enc_q.remove_weight_norm()
-
- def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]
- g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
- z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)
- z_p = self.flow(z, y_mask, g=g)
- z_slice, ids_slice = commons.rand_slice_segments(
- z, y_lengths, self.segment_size
- )
- o = self.dec(z_slice, g=g)
- return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)
-
- def infer(self, phone, phone_lengths, sid, max_len=None):
- g = self.emb_g(sid).unsqueeze(-1)
- m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)
- z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask
- z = self.flow(z_p, x_mask, g=g, reverse=True)
- o = self.dec((z * x_mask)[:, :, :max_len], g=g)
- return o, x_mask, (z, z_p, m_p, logs_p)
-
-
-class MultiPeriodDiscriminator(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminator, self).__init__()
- periods = [2, 3, 5, 7, 11, 17]
- # periods = [3, 5, 7, 11, 17, 23, 37]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
- ]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = [] #
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- # for j in range(len(fmap_r)):
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class MultiPeriodDiscriminatorV2(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(MultiPeriodDiscriminatorV2, self).__init__()
- # periods = [2, 3, 5, 7, 11, 17]
- periods = [2, 3, 5, 7, 11, 17, 23, 37]
-
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
- discs = discs + [
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
- ]
- self.discriminators = nn.ModuleList(discs)
-
- def forward(self, y, y_hat):
- y_d_rs = [] #
- y_d_gs = []
- fmap_rs = []
- fmap_gs = []
- for i, d in enumerate(self.discriminators):
- y_d_r, fmap_r = d(y)
- y_d_g, fmap_g = d(y_hat)
- # for j in range(len(fmap_r)):
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
- y_d_rs.append(y_d_r)
- y_d_gs.append(y_d_g)
- fmap_rs.append(fmap_r)
- fmap_gs.append(fmap_g)
-
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
-
-
-class DiscriminatorS(torch.nn.Module):
- def __init__(self, use_spectral_norm=False):
- super(DiscriminatorS, self).__init__()
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
- ]
- )
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
-
- def forward(self, x):
- fmap = []
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
-
-
-class DiscriminatorP(torch.nn.Module):
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
- super(DiscriminatorP, self).__init__()
- self.period = period
- self.use_spectral_norm = use_spectral_norm
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
- self.convs = nn.ModuleList(
- [
- norm_f(
- Conv2d(
- 1,
- 32,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 32,
- 128,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 128,
- 512,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 512,
- 1024,
- (kernel_size, 1),
- (stride, 1),
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- norm_f(
- Conv2d(
- 1024,
- 1024,
- (kernel_size, 1),
- 1,
- padding=(get_padding(kernel_size, 1), 0),
- )
- ),
- ]
- )
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
-
- def forward(self, x):
- fmap = []
-
- # 1d to 2d
- b, c, t = x.shape
- if t % self.period != 0: # pad first
- n_pad = self.period - (t % self.period)
- x = F.pad(x, (0, n_pad), "reflect")
- t = t + n_pad
- x = x.view(b, c, t // self.period, self.period)
-
- for l in self.convs:
- x = l(x)
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
- fmap.append(x)
- x = self.conv_post(x)
- fmap.append(x)
- x = torch.flatten(x, 1, -1)
-
- return x, fmap
diff --git a/spaces/CofAI/chat.b4/g4f/Provider/Providers/Bard.py b/spaces/CofAI/chat.b4/g4f/Provider/Providers/Bard.py
deleted file mode 100644
index 4c37c4b719430031fce41ce49946f0e6ac93d155..0000000000000000000000000000000000000000
--- a/spaces/CofAI/chat.b4/g4f/Provider/Providers/Bard.py
+++ /dev/null
@@ -1,74 +0,0 @@
-import os, requests, json, browser_cookie3, re, random
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://bard.google.com'
-model = ['Palm2']
-supports_stream = False
-needs_auth = True
-
-def _create_completion(model: str, messages: list, stream: bool, **kwargs):
- psid = {cookie.name: cookie.value for cookie in browser_cookie3.chrome(
- domain_name='.google.com')}['__Secure-1PSID']
-
- formatted = '\n'.join([
- '%s: %s' % (message['role'], message['content']) for message in messages
- ])
- prompt = f'{formatted}\nAssistant:'
-
- proxy = kwargs.get('proxy', False)
- if proxy == False:
- print('warning!, you did not give a proxy, a lot of countries are banned from Google Bard, so it may not work')
-
- snlm0e = None
- conversation_id = None
- response_id = None
- choice_id = None
-
- client = requests.Session()
- client.proxies = {
- 'http': f'http://{proxy}',
- 'https': f'http://{proxy}'} if proxy else None
-
- client.headers = {
- 'authority': 'bard.google.com',
- 'content-type': 'application/x-www-form-urlencoded;charset=UTF-8',
- 'origin': 'https://bard.google.com',
- 'referer': 'https://bard.google.com/',
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
- 'x-same-domain': '1',
- 'cookie': f'__Secure-1PSID={psid}'
- }
-
- snlm0e = re.search(r'SNlM0e\":\"(.*?)\"',
- client.get('https://bard.google.com/').text).group(1) if not snlm0e else snlm0e
-
- params = {
- 'bl': 'boq_assistant-bard-web-server_20230326.21_p0',
- '_reqid': random.randint(1111, 9999),
- 'rt': 'c'
- }
-
- data = {
- 'at': snlm0e,
- 'f.req': json.dumps([None, json.dumps([[prompt], None, [conversation_id, response_id, choice_id]])])}
-
- intents = '.'.join([
- 'assistant',
- 'lamda',
- 'BardFrontendService'
- ])
-
- response = client.post(f'https://bard.google.com/_/BardChatUi/data/{intents}/StreamGenerate',
- data=data, params=params)
-
- chat_data = json.loads(response.content.splitlines()[3])[0][2]
- if chat_data:
- json_chat_data = json.loads(chat_data)
-
- yield json_chat_data[0][0]
-
- else:
- yield 'error'
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file
diff --git a/spaces/CofAI/chat/client/css/stop-generating.css b/spaces/CofAI/chat/client/css/stop-generating.css
deleted file mode 100644
index 3c2010d25065fbef63b104df743ef72c00259871..0000000000000000000000000000000000000000
--- a/spaces/CofAI/chat/client/css/stop-generating.css
+++ /dev/null
@@ -1,38 +0,0 @@
-.stop-generating {
- position: absolute;
- bottom: 128px;
- left: 50%;
- transform: translateX(-50%);
- z-index: 1000000;
-}
-
-.stop-generating button {
- backdrop-filter: blur(20px);
- -webkit-backdrop-filter: blur(20px);
- background-color: var(--blur-bg);
- color: var(--colour-3);
- cursor: pointer;
- animation: show_popup 0.4s;
-}
-
-@keyframes show_popup {
- from {
- opacity: 0;
- transform: translateY(10px);
- }
-}
-
-@keyframes hide_popup {
- to {
- opacity: 0;
- transform: translateY(10px);
- }
-}
-
-.stop-generating-hiding button {
- animation: hide_popup 0.4s;
-}
-
-.stop-generating-hidden button {
- display: none;
-}
diff --git a/spaces/CofAI/chat/g4f/Provider/Providers/Gravityengine.py b/spaces/CofAI/chat/g4f/Provider/Providers/Gravityengine.py
deleted file mode 100644
index f0cd09daaaae0adaa349f91139dc60c7ac79c028..0000000000000000000000000000000000000000
--- a/spaces/CofAI/chat/g4f/Provider/Providers/Gravityengine.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import requests
-import os
-import json
-from ...typing import sha256, Dict, get_type_hints
-
-url = 'https://gpt4.xunika.uk/'
-model = ['gpt-3.5-turbo-16k', 'gpt-3.5-turbo-0613']
-supports_stream = True
-needs_auth = False
-
-def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
- headers = {
- 'Content-Type': 'application/json',
- }
- data = {
- 'model': model,
- 'temperature': 0.7,
- 'presence_penalty': 0,
- 'messages': messages,
- }
- response = requests.post(url + '/api/openai/v1/chat/completions',
- json=data, stream=True)
-
- yield response.json()['choices'][0]['message']['content']
-
-params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
\ No newline at end of file
diff --git a/spaces/Cyril666/my_abi/transforms.py b/spaces/Cyril666/my_abi/transforms.py
deleted file mode 100644
index 5a7042f3368bc832566d5c22d1e18abe5d8547f5..0000000000000000000000000000000000000000
--- a/spaces/Cyril666/my_abi/transforms.py
+++ /dev/null
@@ -1,329 +0,0 @@
-import math
-import numbers
-import random
-
-import cv2
-import numpy as np
-from PIL import Image
-from torchvision import transforms
-from torchvision.transforms import Compose
-
-
-def sample_asym(magnitude, size=None):
- return np.random.beta(1, 4, size) * magnitude
-
-def sample_sym(magnitude, size=None):
- return (np.random.beta(4, 4, size=size) - 0.5) * 2 * magnitude
-
-def sample_uniform(low, high, size=None):
- return np.random.uniform(low, high, size=size)
-
-def get_interpolation(type='random'):
- if type == 'random':
- choice = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA]
- interpolation = choice[random.randint(0, len(choice)-1)]
- elif type == 'nearest': interpolation = cv2.INTER_NEAREST
- elif type == 'linear': interpolation = cv2.INTER_LINEAR
- elif type == 'cubic': interpolation = cv2.INTER_CUBIC
- elif type == 'area': interpolation = cv2.INTER_AREA
- else: raise TypeError('Interpolation types only nearest, linear, cubic, area are supported!')
- return interpolation
-
-class CVRandomRotation(object):
- def __init__(self, degrees=15):
- assert isinstance(degrees, numbers.Number), "degree should be a single number."
- assert degrees >= 0, "degree must be positive."
- self.degrees = degrees
-
- @staticmethod
- def get_params(degrees):
- return sample_sym(degrees)
-
- def __call__(self, img):
- angle = self.get_params(self.degrees)
- src_h, src_w = img.shape[:2]
- M = cv2.getRotationMatrix2D(center=(src_w/2, src_h/2), angle=angle, scale=1.0)
- abs_cos, abs_sin = abs(M[0,0]), abs(M[0,1])
- dst_w = int(src_h * abs_sin + src_w * abs_cos)
- dst_h = int(src_h * abs_cos + src_w * abs_sin)
- M[0, 2] += (dst_w - src_w)/2
- M[1, 2] += (dst_h - src_h)/2
-
- flags = get_interpolation()
- return cv2.warpAffine(img, M, (dst_w, dst_h), flags=flags, borderMode=cv2.BORDER_REPLICATE)
-
-class CVRandomAffine(object):
- def __init__(self, degrees, translate=None, scale=None, shear=None):
- assert isinstance(degrees, numbers.Number), "degree should be a single number."
- assert degrees >= 0, "degree must be positive."
- self.degrees = degrees
-
- if translate is not None:
- assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
- "translate should be a list or tuple and it must be of length 2."
- for t in translate:
- if not (0.0 <= t <= 1.0):
- raise ValueError("translation values should be between 0 and 1")
- self.translate = translate
-
- if scale is not None:
- assert isinstance(scale, (tuple, list)) and len(scale) == 2, \
- "scale should be a list or tuple and it must be of length 2."
- for s in scale:
- if s <= 0:
- raise ValueError("scale values should be positive")
- self.scale = scale
-
- if shear is not None:
- if isinstance(shear, numbers.Number):
- if shear < 0:
- raise ValueError("If shear is a single number, it must be positive.")
- self.shear = [shear]
- else:
- assert isinstance(shear, (tuple, list)) and (len(shear) == 2), \
- "shear should be a list or tuple and it must be of length 2."
- self.shear = shear
- else:
- self.shear = shear
-
- def _get_inverse_affine_matrix(self, center, angle, translate, scale, shear):
- # https://github.com/pytorch/vision/blob/v0.4.0/torchvision/transforms/functional.py#L717
- from numpy import sin, cos, tan
-
- if isinstance(shear, numbers.Number):
- shear = [shear, 0]
-
- if not isinstance(shear, (tuple, list)) and len(shear) == 2:
- raise ValueError(
- "Shear should be a single value or a tuple/list containing " +
- "two values. Got {}".format(shear))
-
- rot = math.radians(angle)
- sx, sy = [math.radians(s) for s in shear]
-
- cx, cy = center
- tx, ty = translate
-
- # RSS without scaling
- a = cos(rot - sy) / cos(sy)
- b = -cos(rot - sy) * tan(sx) / cos(sy) - sin(rot)
- c = sin(rot - sy) / cos(sy)
- d = -sin(rot - sy) * tan(sx) / cos(sy) + cos(rot)
-
- # Inverted rotation matrix with scale and shear
- # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1
- M = [d, -b, 0,
- -c, a, 0]
- M = [x / scale for x in M]
-
- # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1
- M[2] += M[0] * (-cx - tx) + M[1] * (-cy - ty)
- M[5] += M[3] * (-cx - tx) + M[4] * (-cy - ty)
-
- # Apply center translation: C * RSS^-1 * C^-1 * T^-1
- M[2] += cx
- M[5] += cy
- return M
-
- @staticmethod
- def get_params(degrees, translate, scale_ranges, shears, height):
- angle = sample_sym(degrees)
- if translate is not None:
- max_dx = translate[0] * height
- max_dy = translate[1] * height
- translations = (np.round(sample_sym(max_dx)), np.round(sample_sym(max_dy)))
- else:
- translations = (0, 0)
-
- if scale_ranges is not None:
- scale = sample_uniform(scale_ranges[0], scale_ranges[1])
- else:
- scale = 1.0
-
- if shears is not None:
- if len(shears) == 1:
- shear = [sample_sym(shears[0]), 0.]
- elif len(shears) == 2:
- shear = [sample_sym(shears[0]), sample_sym(shears[1])]
- else:
- shear = 0.0
-
- return angle, translations, scale, shear
-
-
- def __call__(self, img):
- src_h, src_w = img.shape[:2]
- angle, translate, scale, shear = self.get_params(
- self.degrees, self.translate, self.scale, self.shear, src_h)
-
- M = self._get_inverse_affine_matrix((src_w/2, src_h/2), angle, (0, 0), scale, shear)
- M = np.array(M).reshape(2,3)
-
- startpoints = [(0, 0), (src_w - 1, 0), (src_w - 1, src_h - 1), (0, src_h - 1)]
- project = lambda x, y, a, b, c: int(a*x + b*y + c)
- endpoints = [(project(x, y, *M[0]), project(x, y, *M[1])) for x, y in startpoints]
-
- rect = cv2.minAreaRect(np.array(endpoints))
- bbox = cv2.boxPoints(rect).astype(dtype=np.int)
- max_x, max_y = bbox[:, 0].max(), bbox[:, 1].max()
- min_x, min_y = bbox[:, 0].min(), bbox[:, 1].min()
-
- dst_w = int(max_x - min_x)
- dst_h = int(max_y - min_y)
- M[0, 2] += (dst_w - src_w) / 2
- M[1, 2] += (dst_h - src_h) / 2
-
- # add translate
- dst_w += int(abs(translate[0]))
- dst_h += int(abs(translate[1]))
- if translate[0] < 0: M[0, 2] += abs(translate[0])
- if translate[1] < 0: M[1, 2] += abs(translate[1])
-
- flags = get_interpolation()
- return cv2.warpAffine(img, M, (dst_w , dst_h), flags=flags, borderMode=cv2.BORDER_REPLICATE)
-
-class CVRandomPerspective(object):
- def __init__(self, distortion=0.5):
- self.distortion = distortion
-
- def get_params(self, width, height, distortion):
- offset_h = sample_asym(distortion * height / 2, size=4).astype(dtype=np.int)
- offset_w = sample_asym(distortion * width / 2, size=4).astype(dtype=np.int)
- topleft = ( offset_w[0], offset_h[0])
- topright = (width - 1 - offset_w[1], offset_h[1])
- botright = (width - 1 - offset_w[2], height - 1 - offset_h[2])
- botleft = ( offset_w[3], height - 1 - offset_h[3])
-
- startpoints = [(0, 0), (width - 1, 0), (width - 1, height - 1), (0, height - 1)]
- endpoints = [topleft, topright, botright, botleft]
- return np.array(startpoints, dtype=np.float32), np.array(endpoints, dtype=np.float32)
-
- def __call__(self, img):
- height, width = img.shape[:2]
- startpoints, endpoints = self.get_params(width, height, self.distortion)
- M = cv2.getPerspectiveTransform(startpoints, endpoints)
-
- # TODO: more robust way to crop image
- rect = cv2.minAreaRect(endpoints)
- bbox = cv2.boxPoints(rect).astype(dtype=np.int)
- max_x, max_y = bbox[:, 0].max(), bbox[:, 1].max()
- min_x, min_y = bbox[:, 0].min(), bbox[:, 1].min()
- min_x, min_y = max(min_x, 0), max(min_y, 0)
-
- flags = get_interpolation()
- img = cv2.warpPerspective(img, M, (max_x, max_y), flags=flags, borderMode=cv2.BORDER_REPLICATE)
- img = img[min_y:, min_x:]
- return img
-
-class CVRescale(object):
-
- def __init__(self, factor=4, base_size=(128, 512)):
- """ Define image scales using gaussian pyramid and rescale image to target scale.
-
- Args:
- factor: the decayed factor from base size, factor=4 keeps target scale by default.
- base_size: base size the build the bottom layer of pyramid
- """
- if isinstance(factor, numbers.Number):
- self.factor = round(sample_uniform(0, factor))
- elif isinstance(factor, (tuple, list)) and len(factor) == 2:
- self.factor = round(sample_uniform(factor[0], factor[1]))
- else:
- raise Exception('factor must be number or list with length 2')
- # assert factor is valid
- self.base_h, self.base_w = base_size[:2]
-
- def __call__(self, img):
- if self.factor == 0: return img
- src_h, src_w = img.shape[:2]
- cur_w, cur_h = self.base_w, self.base_h
- scale_img = cv2.resize(img, (cur_w, cur_h), interpolation=get_interpolation())
- for _ in range(self.factor):
- scale_img = cv2.pyrDown(scale_img)
- scale_img = cv2.resize(scale_img, (src_w, src_h), interpolation=get_interpolation())
- return scale_img
-
-class CVGaussianNoise(object):
- def __init__(self, mean=0, var=20):
- self.mean = mean
- if isinstance(var, numbers.Number):
- self.var = max(int(sample_asym(var)), 1)
- elif isinstance(var, (tuple, list)) and len(var) == 2:
- self.var = int(sample_uniform(var[0], var[1]))
- else:
- raise Exception('degree must be number or list with length 2')
-
- def __call__(self, img):
- noise = np.random.normal(self.mean, self.var**0.5, img.shape)
- img = np.clip(img + noise, 0, 255).astype(np.uint8)
- return img
-
-class CVMotionBlur(object):
- def __init__(self, degrees=12, angle=90):
- if isinstance(degrees, numbers.Number):
- self.degree = max(int(sample_asym(degrees)), 1)
- elif isinstance(degrees, (tuple, list)) and len(degrees) == 2:
- self.degree = int(sample_uniform(degrees[0], degrees[1]))
- else:
- raise Exception('degree must be number or list with length 2')
- self.angle = sample_uniform(-angle, angle)
-
- def __call__(self, img):
- M = cv2.getRotationMatrix2D((self.degree // 2, self.degree // 2), self.angle, 1)
- motion_blur_kernel = np.zeros((self.degree, self.degree))
- motion_blur_kernel[self.degree // 2, :] = 1
- motion_blur_kernel = cv2.warpAffine(motion_blur_kernel, M, (self.degree, self.degree))
- motion_blur_kernel = motion_blur_kernel / self.degree
- img = cv2.filter2D(img, -1, motion_blur_kernel)
- img = np.clip(img, 0, 255).astype(np.uint8)
- return img
-
-class CVGeometry(object):
- def __init__(self, degrees=15, translate=(0.3, 0.3), scale=(0.5, 2.),
- shear=(45, 15), distortion=0.5, p=0.5):
- self.p = p
- type_p = random.random()
- if type_p < 0.33:
- self.transforms = CVRandomRotation(degrees=degrees)
- elif type_p < 0.66:
- self.transforms = CVRandomAffine(degrees=degrees, translate=translate, scale=scale, shear=shear)
- else:
- self.transforms = CVRandomPerspective(distortion=distortion)
-
- def __call__(self, img):
- if random.random() < self.p:
- img = np.array(img)
- return Image.fromarray(self.transforms(img))
- else: return img
-
-class CVDeterioration(object):
- def __init__(self, var, degrees, factor, p=0.5):
- self.p = p
- transforms = []
- if var is not None:
- transforms.append(CVGaussianNoise(var=var))
- if degrees is not None:
- transforms.append(CVMotionBlur(degrees=degrees))
- if factor is not None:
- transforms.append(CVRescale(factor=factor))
-
- random.shuffle(transforms)
- transforms = Compose(transforms)
- self.transforms = transforms
-
- def __call__(self, img):
- if random.random() < self.p:
- img = np.array(img)
- return Image.fromarray(self.transforms(img))
- else: return img
-
-
-class CVColorJitter(object):
- def __init__(self, brightness=0.5, contrast=0.5, saturation=0.5, hue=0.1, p=0.5):
- self.p = p
- self.transforms = transforms.ColorJitter(brightness=brightness, contrast=contrast,
- saturation=saturation, hue=hue)
-
- def __call__(self, img):
- if random.random() < self.p: return self.transforms(img)
- else: return img
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-c5e2dbc1.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-c5e2dbc1.js
deleted file mode 100644
index e1e580861f1b9bf17fc80c49db8226cc19aa01ab..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-c5e2dbc1.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import{E as u,L as v}from"./index-ae57ca19.js";import{s as k,t,h as S,L as w,i as z,w as x,f as R,a as U,b as _,I as T,x as V}from"./index-f90e1963.js";import"./index-3370be2a.js";import"./Blocks-f0129fcd.js";import"./Button-89624748.js";import"./BlockLabel-56db415e.js";import"./Empty-585389a4.js";import"./Copy-6cd42558.js";import"./Download-fdaaf5d4.js";const Y=94,g=1,C=95,Z=96,f=2,$=[9,10,11,12,13,32,133,160,5760,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8232,8233,8239,8287,12288],G=58,N=40,X=95,q=91,c=45,E=46,j=35,D=37;function p(e){return e>=65&&e<=90||e>=97&&e<=122||e>=161}function I(e){return e>=48&&e<=57}const B=new u((e,o)=>{for(let r=!1,a=0,O=0;;O++){let{next:l}=e;if(p(l)||l==c||l==X||r&&I(l))!r&&(l!=c||O>0)&&(r=!0),a===O&&l==c&&a++,e.advance();else{r&&e.acceptToken(l==N?C:a==2&&o.canShift(f)?f:Z);break}}}),A=new u(e=>{if($.includes(e.peek(-1))){let{next:o}=e;(p(o)||o==X||o==j||o==E||o==q||o==G||o==c)&&e.acceptToken(Y)}}),F=new u(e=>{if(!$.includes(e.peek(-1))){let{next:o}=e;if(o==D&&(e.advance(),e.acceptToken(g)),p(o)){do e.advance();while(p(e.next));e.acceptToken(g)}}}),L=k({"AtKeyword import charset namespace keyframes media supports":t.definitionKeyword,"from to selector":t.keyword,NamespaceName:t.namespace,KeyframeName:t.labelName,TagName:t.tagName,ClassName:t.className,PseudoClassName:t.constant(t.className),IdName:t.labelName,"FeatureName PropertyName":t.propertyName,AttributeName:t.attributeName,NumberLiteral:t.number,KeywordQuery:t.keyword,UnaryQueryOp:t.operatorKeyword,"CallTag ValueName":t.atom,VariableName:t.variableName,Callee:t.operatorKeyword,Unit:t.unit,"UniversalSelector NestingSelector":t.definitionOperator,MatchOp:t.compareOperator,"ChildOp SiblingOp, LogicOp":t.logicOperator,BinOp:t.arithmeticOperator,Important:t.modifier,Comment:t.blockComment,ParenthesizedContent:t.special(t.name),ColorLiteral:t.color,StringLiteral:t.string,":":t.punctuation,"PseudoOp #":t.derefOperator,"; ,":t.separator,"( )":t.paren,"[ ]":t.squareBracket,"{ }":t.brace}),K={__proto__:null,lang:32,"nth-child":32,"nth-last-child":32,"nth-of-type":32,"nth-last-of-type":32,dir:32,"host-context":32,url:60,"url-prefix":60,domain:60,regexp:60,selector:134},J={__proto__:null,"@import":114,"@media":138,"@charset":142,"@namespace":146,"@keyframes":152,"@supports":164},H={__proto__:null,not:128,only:128,from:158,to:160},M=v.deserialize({version:14,states:"7WQYQ[OOO#_Q[OOOOQP'#Cd'#CdOOQP'#Cc'#CcO#fQ[O'#CfO$YQXO'#CaO$aQ[O'#ChO$lQ[O'#DPO$qQ[O'#DTOOQP'#Ed'#EdO$vQdO'#DeO%bQ[O'#DrO$vQdO'#DtO%sQ[O'#DvO&OQ[O'#DyO&TQ[O'#EPO&cQ[O'#EROOQS'#Ec'#EcOOQS'#ET'#ETQYQ[OOO&jQXO'#CdO'_QWO'#DaO'dQWO'#EjO'oQ[O'#EjQOQWOOOOQP'#Cg'#CgOOQP,59Q,59QO#fQ[O,59QO'yQ[O'#EWO(eQWO,58{O(mQ[O,59SO$lQ[O,59kO$qQ[O,59oO'yQ[O,59sO'yQ[O,59uO'yQ[O,59vO(xQ[O'#D`OOQS,58{,58{OOQP'#Ck'#CkOOQO'#C}'#C}OOQP,59S,59SO)PQWO,59SO)UQWO,59SOOQP'#DR'#DROOQP,59k,59kOOQO'#DV'#DVO)ZQ`O,59oOOQS'#Cp'#CpO$vQdO'#CqO)cQvO'#CsO*pQtO,5:POOQO'#Cx'#CxO)UQWO'#CwO+UQWO'#CyOOQS'#Eg'#EgOOQO'#Dh'#DhO+ZQ[O'#DoO+iQWO'#EkO&TQ[O'#DmO+wQWO'#DpOOQO'#El'#ElO(hQWO,5:^O+|QpO,5:`OOQS'#Dx'#DxO,UQWO,5:bO,ZQ[O,5:bOOQO'#D{'#D{O,cQWO,5:eO,hQWO,5:kO,pQWO,5:mOOQS-E8R-E8RO$vQdO,59{O,xQ[O'#EYO-VQWO,5;UO-VQWO,5;UOOQP1G.l1G.lO-|QXO,5:rOOQO-E8U-E8UOOQS1G.g1G.gOOQP1G.n1G.nO)PQWO1G.nO)UQWO1G.nOOQP1G/V1G/VO.ZQ`O1G/ZO.tQXO1G/_O/[QXO1G/aO/rQXO1G/bO0YQWO,59zO0_Q[O'#DOO0fQdO'#CoOOQP1G/Z1G/ZO$vQdO1G/ZO0mQpO,59]OOQS,59_,59_O$vQdO,59aO0uQWO1G/kOOQS,59c,59cO0zQ!bO,59eO1SQWO'#DhO1_QWO,5:TO1dQWO,5:ZO&TQ[O,5:VO&TQ[O'#EZO1lQWO,5;VO1wQWO,5:XO'yQ[O,5:[OOQS1G/x1G/xOOQS1G/z1G/zOOQS1G/|1G/|O2YQWO1G/|O2_QdO'#D|OOQS1G0P1G0POOQS1G0V1G0VOOQS1G0X1G0XO2mQtO1G/gOOQO,5:t,5:tO3TQ[O,5:tOOQO-E8W-E8WO3bQWO1G0pOOQP7+$Y7+$YOOQP7+$u7+$uO$vQdO7+$uOOQS1G/f1G/fO3mQXO'#EiO3tQWO,59jO3yQtO'#EUO4nQdO'#EfO4xQWO,59ZO4}QpO7+$uOOQS1G.w1G.wOOQS1G.{1G.{OOQS7+%V7+%VO5VQWO1G/PO$vQdO1G/oOOQO1G/u1G/uOOQO1G/q1G/qO5[QWO,5:uOOQO-E8X-E8XO5jQXO1G/vOOQS7+%h7+%hO5qQYO'#CsO(hQWO'#E[O5yQdO,5:hOOQS,5:h,5:hO6XQtO'#EXO$vQdO'#EXO7VQdO7+%ROOQO7+%R7+%ROOQO1G0`1G0`O7jQpO<T![;'S%^;'S;=`%o<%lO%^^;TUoWOy%^z!Q%^!Q![;g![;'S%^;'S;=`%o<%lO%^^;nYoW#[UOy%^z!Q%^!Q![;g![!g%^!g!h<^!h#X%^#X#Y<^#Y;'S%^;'S;=`%o<%lO%^^[[oW#[UOy%^z!O%^!O!P;g!P!Q%^!Q![>T![!g%^!g!h<^!h#X%^#X#Y<^#Y;'S%^;'S;=`%o<%lO%^_?VSpVOy%^z;'S%^;'S;=`%o<%lO%^^?hWjSOy%^z!O%^!O!P;O!P!Q%^!Q![>T![;'S%^;'S;=`%o<%lO%^_@VU#XPOy%^z!Q%^!Q![;g![;'S%^;'S;=`%o<%lO%^~@nTjSOy%^z{@}{;'S%^;'S;=`%o<%lO%^~ASUoWOy@}yzAfz{Bm{;'S@};'S;=`Co<%lO@}~AiTOzAfz{Ax{;'SAf;'S;=`Bg<%lOAf~A{VOzAfz{Ax{!PAf!P!QBb!Q;'SAf;'S;=`Bg<%lOAf~BgOR~~BjP;=`<%lAf~BrWoWOy@}yzAfz{Bm{!P@}!P!QC[!Q;'S@};'S;=`Co<%lO@}~CcSoWR~Oy%^z;'S%^;'S;=`%o<%lO%^~CrP;=`<%l@}^Cz[#[UOy%^z!O%^!O!P;g!P!Q%^!Q![>T![!g%^!g!h<^!h#X%^#X#Y<^#Y;'S%^;'S;=`%o<%lO%^XDuU]POy%^z![%^![!]EX!];'S%^;'S;=`%o<%lO%^XE`S^PoWOy%^z;'S%^;'S;=`%o<%lO%^_EqS!WVOy%^z;'S%^;'S;=`%o<%lO%^YFSSzQOy%^z;'S%^;'S;=`%o<%lO%^XFeU|POy%^z!`%^!`!aFw!a;'S%^;'S;=`%o<%lO%^XGOS|PoWOy%^z;'S%^;'S;=`%o<%lO%^XG_WOy%^z!c%^!c!}Gw!}#T%^#T#oGw#o;'S%^;'S;=`%o<%lO%^XHO[!YPoWOy%^z}%^}!OGw!O!Q%^!Q![Gw![!c%^!c!}Gw!}#T%^#T#oGw#o;'S%^;'S;=`%o<%lO%^XHySxPOy%^z;'S%^;'S;=`%o<%lO%^^I[SvUOy%^z;'S%^;'S;=`%o<%lO%^XIkUOy%^z#b%^#b#cI}#c;'S%^;'S;=`%o<%lO%^XJSUoWOy%^z#W%^#W#XJf#X;'S%^;'S;=`%o<%lO%^XJmS!`PoWOy%^z;'S%^;'S;=`%o<%lO%^XJ|UOy%^z#f%^#f#gJf#g;'S%^;'S;=`%o<%lO%^XKeS!RPOy%^z;'S%^;'S;=`%o<%lO%^_KvS!QVOy%^z;'S%^;'S;=`%o<%lO%^ZLXU!PPOy%^z!_%^!_!`6y!`;'S%^;'S;=`%o<%lO%^WLnP;=`<%l$}",tokenizers:[A,F,B,0,1,2,3],topRules:{StyleSheet:[0,4],Styles:[1,84]},specialized:[{term:95,get:e=>K[e]||-1},{term:56,get:e=>J[e]||-1},{term:96,get:e=>H[e]||-1}],tokenPrec:1123});let Q=null;function m(){if(!Q&&typeof document=="object"&&document.body){let{style:e}=document.body,o=[],r=new Set;for(let a in e)a!="cssText"&&a!="cssFloat"&&typeof e[a]=="string"&&(/[A-Z]/.test(a)&&(a=a.replace(/[A-Z]/g,O=>"-"+O.toLowerCase())),r.has(a)||(o.push(a),r.add(a)));Q=o.sort().map(a=>({type:"property",label:a}))}return Q||[]}const h=["active","after","any-link","autofill","backdrop","before","checked","cue","default","defined","disabled","empty","enabled","file-selector-button","first","first-child","first-letter","first-line","first-of-type","focus","focus-visible","focus-within","fullscreen","has","host","host-context","hover","in-range","indeterminate","invalid","is","lang","last-child","last-of-type","left","link","marker","modal","not","nth-child","nth-last-child","nth-last-of-type","nth-of-type","only-child","only-of-type","optional","out-of-range","part","placeholder","placeholder-shown","read-only","read-write","required","right","root","scope","selection","slotted","target","target-text","valid","visited","where"].map(e=>({type:"class",label:e})),b=["above","absolute","activeborder","additive","activecaption","after-white-space","ahead","alias","all","all-scroll","alphabetic","alternate","always","antialiased","appworkspace","asterisks","attr","auto","auto-flow","avoid","avoid-column","avoid-page","avoid-region","axis-pan","background","backwards","baseline","below","bidi-override","blink","block","block-axis","bold","bolder","border","border-box","both","bottom","break","break-all","break-word","bullets","button","button-bevel","buttonface","buttonhighlight","buttonshadow","buttontext","calc","capitalize","caps-lock-indicator","caption","captiontext","caret","cell","center","checkbox","circle","cjk-decimal","clear","clip","close-quote","col-resize","collapse","color","color-burn","color-dodge","column","column-reverse","compact","condensed","contain","content","contents","content-box","context-menu","continuous","copy","counter","counters","cover","crop","cross","crosshair","currentcolor","cursive","cyclic","darken","dashed","decimal","decimal-leading-zero","default","default-button","dense","destination-atop","destination-in","destination-out","destination-over","difference","disc","discard","disclosure-closed","disclosure-open","document","dot-dash","dot-dot-dash","dotted","double","down","e-resize","ease","ease-in","ease-in-out","ease-out","element","ellipse","ellipsis","embed","end","ethiopic-abegede-gez","ethiopic-halehame-aa-er","ethiopic-halehame-gez","ew-resize","exclusion","expanded","extends","extra-condensed","extra-expanded","fantasy","fast","fill","fill-box","fixed","flat","flex","flex-end","flex-start","footnotes","forwards","from","geometricPrecision","graytext","grid","groove","hand","hard-light","help","hidden","hide","higher","highlight","highlighttext","horizontal","hsl","hsla","hue","icon","ignore","inactiveborder","inactivecaption","inactivecaptiontext","infinite","infobackground","infotext","inherit","initial","inline","inline-axis","inline-block","inline-flex","inline-grid","inline-table","inset","inside","intrinsic","invert","italic","justify","keep-all","landscape","large","larger","left","level","lighter","lighten","line-through","linear","linear-gradient","lines","list-item","listbox","listitem","local","logical","loud","lower","lower-hexadecimal","lower-latin","lower-norwegian","lowercase","ltr","luminosity","manipulation","match","matrix","matrix3d","medium","menu","menutext","message-box","middle","min-intrinsic","mix","monospace","move","multiple","multiple_mask_images","multiply","n-resize","narrower","ne-resize","nesw-resize","no-close-quote","no-drop","no-open-quote","no-repeat","none","normal","not-allowed","nowrap","ns-resize","numbers","numeric","nw-resize","nwse-resize","oblique","opacity","open-quote","optimizeLegibility","optimizeSpeed","outset","outside","outside-shape","overlay","overline","padding","padding-box","painted","page","paused","perspective","pinch-zoom","plus-darker","plus-lighter","pointer","polygon","portrait","pre","pre-line","pre-wrap","preserve-3d","progress","push-button","radial-gradient","radio","read-only","read-write","read-write-plaintext-only","rectangle","region","relative","repeat","repeating-linear-gradient","repeating-radial-gradient","repeat-x","repeat-y","reset","reverse","rgb","rgba","ridge","right","rotate","rotate3d","rotateX","rotateY","rotateZ","round","row","row-resize","row-reverse","rtl","run-in","running","s-resize","sans-serif","saturation","scale","scale3d","scaleX","scaleY","scaleZ","screen","scroll","scrollbar","scroll-position","se-resize","self-start","self-end","semi-condensed","semi-expanded","separate","serif","show","single","skew","skewX","skewY","skip-white-space","slide","slider-horizontal","slider-vertical","sliderthumb-horizontal","sliderthumb-vertical","slow","small","small-caps","small-caption","smaller","soft-light","solid","source-atop","source-in","source-out","source-over","space","space-around","space-between","space-evenly","spell-out","square","start","static","status-bar","stretch","stroke","stroke-box","sub","subpixel-antialiased","svg_masks","super","sw-resize","symbolic","symbols","system-ui","table","table-caption","table-cell","table-column","table-column-group","table-footer-group","table-header-group","table-row","table-row-group","text","text-bottom","text-top","textarea","textfield","thick","thin","threeddarkshadow","threedface","threedhighlight","threedlightshadow","threedshadow","to","top","transform","translate","translate3d","translateX","translateY","translateZ","transparent","ultra-condensed","ultra-expanded","underline","unidirectional-pan","unset","up","upper-latin","uppercase","url","var","vertical","vertical-text","view-box","visible","visibleFill","visiblePainted","visibleStroke","visual","w-resize","wait","wave","wider","window","windowframe","windowtext","words","wrap","wrap-reverse","x-large","x-small","xor","xx-large","xx-small"].map(e=>({type:"keyword",label:e})).concat(["aliceblue","antiquewhite","aqua","aquamarine","azure","beige","bisque","black","blanchedalmond","blue","blueviolet","brown","burlywood","cadetblue","chartreuse","chocolate","coral","cornflowerblue","cornsilk","crimson","cyan","darkblue","darkcyan","darkgoldenrod","darkgray","darkgreen","darkkhaki","darkmagenta","darkolivegreen","darkorange","darkorchid","darkred","darksalmon","darkseagreen","darkslateblue","darkslategray","darkturquoise","darkviolet","deeppink","deepskyblue","dimgray","dodgerblue","firebrick","floralwhite","forestgreen","fuchsia","gainsboro","ghostwhite","gold","goldenrod","gray","grey","green","greenyellow","honeydew","hotpink","indianred","indigo","ivory","khaki","lavender","lavenderblush","lawngreen","lemonchiffon","lightblue","lightcoral","lightcyan","lightgoldenrodyellow","lightgray","lightgreen","lightpink","lightsalmon","lightseagreen","lightskyblue","lightslategray","lightsteelblue","lightyellow","lime","limegreen","linen","magenta","maroon","mediumaquamarine","mediumblue","mediumorchid","mediumpurple","mediumseagreen","mediumslateblue","mediumspringgreen","mediumturquoise","mediumvioletred","midnightblue","mintcream","mistyrose","moccasin","navajowhite","navy","oldlace","olive","olivedrab","orange","orangered","orchid","palegoldenrod","palegreen","paleturquoise","palevioletred","papayawhip","peachpuff","peru","pink","plum","powderblue","purple","rebeccapurple","red","rosybrown","royalblue","saddlebrown","salmon","sandybrown","seagreen","seashell","sienna","silver","skyblue","slateblue","slategray","snow","springgreen","steelblue","tan","teal","thistle","tomato","turquoise","violet","wheat","white","whitesmoke","yellow","yellowgreen"].map(e=>({type:"constant",label:e}))),ee=["a","abbr","address","article","aside","b","bdi","bdo","blockquote","body","br","button","canvas","caption","cite","code","col","colgroup","dd","del","details","dfn","dialog","div","dl","dt","em","figcaption","figure","footer","form","header","hgroup","h1","h2","h3","h4","h5","h6","hr","html","i","iframe","img","input","ins","kbd","label","legend","li","main","meter","nav","ol","output","p","pre","ruby","section","select","small","source","span","strong","sub","summary","sup","table","tbody","td","template","textarea","tfoot","th","thead","tr","u","ul"].map(e=>({type:"type",label:e})),n=/^(\w[\w-]*|-\w[\w-]*|)$/,ae=/^-(-[\w-]*)?$/;function Oe(e,o){var r;if((e.name=="("||e.type.isError)&&(e=e.parent||e),e.name!="ArgList")return!1;let a=(r=e.parent)===null||r===void 0?void 0:r.firstChild;return a?.name!="Callee"?!1:o.sliceString(a.from,a.to)=="var"}const y=new V,te=["Declaration"];function W(e,o){if(o.to-o.from>4096){let r=y.get(o);if(r)return r;let a=[],O=new Set,l=o.cursor(T.IncludeAnonymous);if(l.firstChild())do for(let i of W(e,l.node))O.has(i.label)||(O.add(i.label),a.push(i));while(l.nextSibling());return y.set(o,a),a}else{let r=[],a=new Set;return o.cursor().iterate(O=>{var l;if(O.name=="VariableName"&&O.matchContext(te)&&((l=O.node.nextSibling)===null||l===void 0?void 0:l.name)==":"){let i=e.sliceString(O.from,O.to);a.has(i)||(a.add(i),r.push({label:i,type:"variable"}))}}),r}}const oe=e=>{var o;let{state:r,pos:a}=e,O=S(r).resolveInner(a,-1),l=O.type.isError&&O.from==O.to-1&&r.doc.sliceString(O.from,O.to)=="-";if(O.name=="PropertyName"||l&&((o=O.parent)===null||o===void 0?void 0:o.name)=="Block")return{from:O.from,options:m(),validFor:n};if(O.name=="ValueName")return{from:O.from,options:b,validFor:n};if(O.name=="PseudoClassName")return{from:O.from,options:h,validFor:n};if(O.name=="VariableName"||(e.explicit||l)&&Oe(O,r.doc))return{from:O.name=="VariableName"?O.from:a,options:W(r.doc,S(r).topNode),validFor:ae};if(O.name=="TagName"){for(let{parent:d}=O;d;d=d.parent)if(d.name=="Block")return{from:O.from,options:m(),validFor:n};return{from:O.from,options:ee,validFor:n}}if(!e.explicit)return null;let i=O.resolve(a),s=i.childBefore(a);return s&&s.name==":"&&i.name=="PseudoClassSelector"?{from:a,options:h,validFor:n}:s&&s.name==":"&&i.name=="Declaration"||i.name=="ArgList"?{from:a,options:b,validFor:n}:i.name=="Block"?{from:a,options:m(),validFor:n}:null},P=w.define({name:"css",parser:M.configure({props:[z.add({Declaration:x()}),R.add({Block:U})]}),languageData:{commentTokens:{block:{open:"/*",close:"*/"}},indentOnInput:/^\s*\}$/,wordChars:"-"}});function me(){return new _(P,P.data.of({autocomplete:oe}))}export{me as css,oe as cssCompletionSource,P as cssLanguage};
-//# sourceMappingURL=index-c5e2dbc1.js.map
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/commands/delete_cache.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/commands/delete_cache.py
deleted file mode 100644
index 1912c84cbc494c648572f91107cda5d5973680d8..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/commands/delete_cache.py
+++ /dev/null
@@ -1,427 +0,0 @@
-# coding=utf-8
-# Copyright 2022-present, the HuggingFace Inc. team.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Contains command to delete some revisions from the HF cache directory.
-
-Usage:
- huggingface-cli delete-cache
- huggingface-cli delete-cache --disable-tui
- huggingface-cli delete-cache --dir ~/.cache/huggingface/hub
-
-NOTE:
- This command is based on `InquirerPy` to build the multiselect menu in the terminal.
- This dependency has to be installed with `pip install huggingface_hub[cli]`. Since
- we want to avoid as much as possible cross-platform issues, I chose a library that
- is built on top of `python-prompt-toolkit` which seems to be a reference in terminal
- GUI (actively maintained on both Unix and Windows, 7.9k stars).
-
- For the moment, the TUI feature is in beta.
-
- See:
- - https://github.com/kazhala/InquirerPy
- - https://inquirerpy.readthedocs.io/en/latest/
- - https://github.com/prompt-toolkit/python-prompt-toolkit
-
- Other solutions could have been:
- - `simple_term_menu`: would be good as well for our use case but some issues suggest
- that Windows is less supported.
- See: https://github.com/IngoMeyer441/simple-term-menu
- - `PyInquirer`: very similar to `InquirerPy` but older and not maintained anymore.
- In particular, no support of Python3.10.
- See: https://github.com/CITGuru/PyInquirer
- - `pick` (or `pickpack`): easy to use and flexible but built on top of Python's
- standard library `curses` that is specific to Unix (not implemented on Windows).
- See https://github.com/wong2/pick and https://github.com/anafvana/pickpack.
- - `inquirer`: lot of traction (700 stars) but explicitly states "experimental
- support of Windows". Not built on top of `python-prompt-toolkit`.
- See https://github.com/magmax/python-inquirer
-
-TODO: add support for `huggingface-cli delete-cache aaaaaa bbbbbb cccccc (...)` ?
-TODO: add "--keep-last" arg to delete revisions that are not on `main` ref
-TODO: add "--filter" arg to filter repositories by name ?
-TODO: add "--sort" arg to sort by size ?
-TODO: add "--limit" arg to limit to X repos ?
-TODO: add "-y" arg for immediate deletion ?
-See discussions in https://github.com/huggingface/huggingface_hub/issues/1025.
-"""
-import os
-from argparse import _SubParsersAction
-from functools import wraps
-from tempfile import mkstemp
-from typing import Any, Callable, Iterable, List, Optional, Union
-
-from ..utils import CachedRepoInfo, CachedRevisionInfo, HFCacheInfo, scan_cache_dir
-from . import BaseHuggingfaceCLICommand
-from ._cli_utils import ANSI
-
-
-try:
- from InquirerPy import inquirer
- from InquirerPy.base.control import Choice
- from InquirerPy.separator import Separator
-
- _inquirer_py_available = True
-except ImportError:
- _inquirer_py_available = False
-
-
-def require_inquirer_py(fn: Callable) -> Callable:
- """Decorator to flag methods that require `InquirerPy`."""
-
- # TODO: refactor this + imports in a unified pattern across codebase
- @wraps(fn)
- def _inner(*args, **kwargs):
- if not _inquirer_py_available:
- raise ImportError(
- "The `delete-cache` command requires extra dependencies to work with"
- " the TUI.\nPlease run `pip install huggingface_hub[cli]` to install"
- " them.\nOtherwise, disable TUI using the `--disable-tui` flag."
- )
-
- return fn(*args, **kwargs)
-
- return _inner
-
-
-# Possibility for the user to cancel deletion
-_CANCEL_DELETION_STR = "CANCEL_DELETION"
-
-
-class DeleteCacheCommand(BaseHuggingfaceCLICommand):
- @staticmethod
- def register_subcommand(parser: _SubParsersAction):
- delete_cache_parser = parser.add_parser("delete-cache", help="Delete revisions from the cache directory.")
-
- delete_cache_parser.add_argument(
- "--dir",
- type=str,
- default=None,
- help="cache directory (optional). Default to the default HuggingFace cache.",
- )
-
- delete_cache_parser.add_argument(
- "--disable-tui",
- action="store_true",
- help=(
- "Disable Terminal User Interface (TUI) mode. Useful if your"
- " platform/terminal doesn't support the multiselect menu."
- ),
- )
-
- delete_cache_parser.set_defaults(func=DeleteCacheCommand)
-
- def __init__(self, args):
- self.cache_dir: Optional[str] = args.dir
- self.disable_tui: bool = args.disable_tui
-
- def run(self):
- """Run `delete-cache` command with or without TUI."""
- # Scan cache directory
- hf_cache_info = scan_cache_dir(self.cache_dir)
-
- # Manual review from the user
- if self.disable_tui:
- selected_hashes = _manual_review_no_tui(hf_cache_info, preselected=[])
- else:
- selected_hashes = _manual_review_tui(hf_cache_info, preselected=[])
-
- # If deletion is not cancelled
- if len(selected_hashes) > 0 and _CANCEL_DELETION_STR not in selected_hashes:
- confirm_message = _get_expectations_str(hf_cache_info, selected_hashes) + " Confirm deletion ?"
-
- # Confirm deletion
- if self.disable_tui:
- confirmed = _ask_for_confirmation_no_tui(confirm_message)
- else:
- confirmed = _ask_for_confirmation_tui(confirm_message)
-
- # Deletion is confirmed
- if confirmed:
- strategy = hf_cache_info.delete_revisions(*selected_hashes)
- print("Start deletion.")
- strategy.execute()
- print(
- f"Done. Deleted {len(strategy.repos)} repo(s) and"
- f" {len(strategy.snapshots)} revision(s) for a total of"
- f" {strategy.expected_freed_size_str}."
- )
- return
-
- # Deletion is cancelled
- print("Deletion is cancelled. Do nothing.")
-
-
-@require_inquirer_py
-def _manual_review_tui(hf_cache_info: HFCacheInfo, preselected: List[str]) -> List[str]:
- """Ask the user for a manual review of the revisions to delete.
-
- Displays a multi-select menu in the terminal (TUI).
- """
- # Define multiselect list
- choices = _get_tui_choices_from_scan(repos=hf_cache_info.repos, preselected=preselected)
- checkbox = inquirer.checkbox(
- message="Select revisions to delete:",
- choices=choices, # List of revisions with some pre-selection
- cycle=False, # No loop between top and bottom
- height=100, # Large list if possible
- # We use the instruction to display to the user the expected effect of the
- # deletion.
- instruction=_get_expectations_str(
- hf_cache_info,
- selected_hashes=[c.value for c in choices if isinstance(c, Choice) and c.enabled],
- ),
- # We use the long instruction to should keybindings instructions to the user
- long_instruction="Press to select, to validate and to quit without modification.",
- # Message that is displayed once the user validates its selection.
- transformer=lambda result: f"{len(result)} revision(s) selected.",
- )
-
- # Add a callback to update the information line when a revision is
- # selected/unselected
- def _update_expectations(_) -> None:
- # Hacky way to dynamically set an instruction message to the checkbox when
- # a revision hash is selected/unselected.
- checkbox._instruction = _get_expectations_str(
- hf_cache_info,
- selected_hashes=[choice["value"] for choice in checkbox.content_control.choices if choice["enabled"]],
- )
-
- checkbox.kb_func_lookup["toggle"].append({"func": _update_expectations})
-
- # Finally display the form to the user.
- try:
- return checkbox.execute()
- except KeyboardInterrupt:
- return [] # Quit without deletion
-
-
-@require_inquirer_py
-def _ask_for_confirmation_tui(message: str, default: bool = True) -> bool:
- """Ask for confirmation using Inquirer."""
- return inquirer.confirm(message, default=default).execute()
-
-
-def _get_tui_choices_from_scan(repos: Iterable[CachedRepoInfo], preselected: List[str]) -> List:
- """Build a list of choices from the scanned repos.
-
- Args:
- repos (*Iterable[`CachedRepoInfo`]*):
- List of scanned repos on which we want to delete revisions.
- preselected (*List[`str`]*):
- List of revision hashes that will be preselected.
-
- Return:
- The list of choices to pass to `inquirer.checkbox`.
- """
- choices: List[Union[Choice, Separator]] = []
-
- # First choice is to cancel the deletion. If selected, nothing will be deleted,
- # no matter the other selected items.
- choices.append(
- Choice(
- _CANCEL_DELETION_STR,
- name="None of the following (if selected, nothing will be deleted).",
- enabled=False,
- )
- )
-
- # Display a separator per repo and a Choice for each revisions of the repo
- for repo in sorted(repos, key=_repo_sorting_order):
- # Repo as separator
- choices.append(
- Separator(
- f"\n{repo.repo_type.capitalize()} {repo.repo_id} ({repo.size_on_disk_str},"
- f" used {repo.last_accessed_str})"
- )
- )
- for revision in sorted(repo.revisions, key=_revision_sorting_order):
- # Revision as choice
- choices.append(
- Choice(
- revision.commit_hash,
- name=(
- f"{revision.commit_hash[:8]}:"
- f" {', '.join(sorted(revision.refs)) or '(detached)'} #"
- f" modified {revision.last_modified_str}"
- ),
- enabled=revision.commit_hash in preselected,
- )
- )
-
- # Return choices
- return choices
-
-
-def _manual_review_no_tui(hf_cache_info: HFCacheInfo, preselected: List[str]) -> List[str]:
- """Ask the user for a manual review of the revisions to delete.
-
- Used when TUI is disabled. Manual review happens in a separate tmp file that the
- user can manually edit.
- """
- # 1. Generate temporary file with delete commands.
- fd, tmp_path = mkstemp(suffix=".txt") # suffix to make it easier to find by editors
- os.close(fd)
-
- lines = []
- for repo in sorted(hf_cache_info.repos, key=_repo_sorting_order):
- lines.append(
- f"\n# {repo.repo_type.capitalize()} {repo.repo_id} ({repo.size_on_disk_str},"
- f" used {repo.last_accessed_str})"
- )
- for revision in sorted(repo.revisions, key=_revision_sorting_order):
- lines.append(
- # Deselect by prepending a '#'
- f"{'' if revision.commit_hash in preselected else '#'} "
- f" {revision.commit_hash} # Refs:"
- # Print `refs` as comment on same line
- f" {', '.join(sorted(revision.refs)) or '(detached)'} # modified"
- # Print `last_modified` as comment on same line
- f" {revision.last_modified_str}"
- )
-
- with open(tmp_path, "w") as f:
- f.write(_MANUAL_REVIEW_NO_TUI_INSTRUCTIONS)
- f.write("\n".join(lines))
-
- # 2. Prompt instructions to user.
- instructions = f"""
- TUI is disabled. In order to select which revisions you want to delete, please edit
- the following file using the text editor of your choice. Instructions for manual
- editing are located at the beginning of the file. Edit the file, save it and confirm
- to continue.
- File to edit: {ANSI.bold(tmp_path)}
- """
- print("\n".join(line.strip() for line in instructions.strip().split("\n")))
-
- # 3. Wait for user confirmation.
- while True:
- selected_hashes = _read_manual_review_tmp_file(tmp_path)
- if _ask_for_confirmation_no_tui(
- _get_expectations_str(hf_cache_info, selected_hashes) + " Continue ?",
- default=False,
- ):
- break
-
- # 4. Return selected_hashes
- os.remove(tmp_path)
- return selected_hashes
-
-
-def _ask_for_confirmation_no_tui(message: str, default: bool = True) -> bool:
- """Ask for confirmation using pure-python."""
- YES = ("y", "yes", "1")
- NO = ("n", "no", "0")
- DEFAULT = ""
- ALL = YES + NO + (DEFAULT,)
- full_message = message + (" (Y/n) " if default else " (y/N) ")
- while True:
- answer = input(full_message).lower()
- if answer == DEFAULT:
- return default
- if answer in YES:
- return True
- if answer in NO:
- return False
- print(f"Invalid input. Must be one of {ALL}")
-
-
-def _get_expectations_str(hf_cache_info: HFCacheInfo, selected_hashes: List[str]) -> str:
- """Format a string to display to the user how much space would be saved.
-
- Example:
- ```
- >>> _get_expectations_str(hf_cache_info, selected_hashes)
- '7 revisions selected counting for 4.3G.'
- ```
- """
- if _CANCEL_DELETION_STR in selected_hashes:
- return "Nothing will be deleted."
- strategy = hf_cache_info.delete_revisions(*selected_hashes)
- return f"{len(selected_hashes)} revisions selected counting for {strategy.expected_freed_size_str}."
-
-
-def _read_manual_review_tmp_file(tmp_path: str) -> List[str]:
- """Read the manually reviewed instruction file and return a list of revision hash.
-
- Example:
- ```txt
- # This is the tmp file content
- ###
-
- # Commented out line
- 123456789 # revision hash
-
- # Something else
- # a_newer_hash # 2 days ago
- an_older_hash # 3 days ago
- ```
-
- ```py
- >>> _read_manual_review_tmp_file(tmp_path)
- ['123456789', 'an_older_hash']
- ```
- """
- with open(tmp_path) as f:
- content = f.read()
-
- # Split lines
- lines = [line.strip() for line in content.split("\n")]
-
- # Filter commented lines
- selected_lines = [line for line in lines if not line.startswith("#")]
-
- # Select only before comment
- selected_hashes = [line.split("#")[0].strip() for line in selected_lines]
-
- # Return revision hashes
- return [hash for hash in selected_hashes if len(hash) > 0]
-
-
-_MANUAL_REVIEW_NO_TUI_INSTRUCTIONS = f"""
-# INSTRUCTIONS
-# ------------
-# This is a temporary file created by running `huggingface-cli delete-cache` with the
-# `--disable-tui` option. It contains a set of revisions that can be deleted from your
-# local cache directory.
-#
-# Please manually review the revisions you want to delete:
-# - Revision hashes can be commented out with '#'.
-# - Only non-commented revisions in this file will be deleted.
-# - Revision hashes that are removed from this file are ignored as well.
-# - If `{_CANCEL_DELETION_STR}` line is uncommented, the all cache deletion is cancelled and
-# no changes will be applied.
-#
-# Once you've manually reviewed this file, please confirm deletion in the terminal. This
-# file will be automatically removed once done.
-# ------------
-
-# KILL SWITCH
-# ------------
-# Un-comment following line to completely cancel the deletion process
-# {_CANCEL_DELETION_STR}
-# ------------
-
-# REVISIONS
-# ------------
-""".strip()
-
-
-def _repo_sorting_order(repo: CachedRepoInfo) -> Any:
- # First split by Dataset/Model, then sort by last accessed (oldest first)
- return (repo.repo_type, repo.last_accessed)
-
-
-def _revision_sorting_order(revision: CachedRevisionInfo) -> Any:
- # Sort by last modified (oldest first)
- return revision.last_modified
diff --git a/spaces/DataScienceGuild/ChatbotWithDataframeMemory/README.md b/spaces/DataScienceGuild/ChatbotWithDataframeMemory/README.md
deleted file mode 100644
index 4c3382a1550c04bfccd10c35c6a83c2c8e24a2cb..0000000000000000000000000000000000000000
--- a/spaces/DataScienceGuild/ChatbotWithDataframeMemory/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: ChatbotWithDataframeMemory
-emoji: 🌖
-colorFrom: purple
-colorTo: green
-sdk: gradio
-sdk_version: 3.16.2
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/visualization/floorplan.py b/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/visualization/floorplan.py
deleted file mode 100644
index 2c38e9f8410d4225cc3893fe76e4ff8b96810332..0000000000000000000000000000000000000000
--- a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/visualization/floorplan.py
+++ /dev/null
@@ -1,147 +0,0 @@
-"""
-@date: 2021/6/29
-@description:
-"""
-import cv2
-
-
-import matplotlib.pyplot as plt
-
-from PIL import Image
-from utils.boundary import *
-
-
-def draw_floorplan(xz, fill_color=None, border_color=None, side_l=512, show_radius=None, show=False, marker_color=None,
- center_color=None, scale=1.5):
- """
- :param scale:
- :param center_color:
- :param marker_color: for corners marking
- :param fill_color:
- :param border_color: boundary color
- :param xz: [[x1, z1], [x2, z2], ....]
- :param side_l: side length (pixel) of the output result
- :param show_radius: The displayed maximum radius m (proportional to the projection plane plan_y of xz),
- such as set to 1, means that the pixel value of side_l/2 is expressed as 1m, if not set this value to display all
- :param show:
- :return:
- """
- if fill_color is None:
- fill_color = [1]
-
- board = np.zeros([side_l, side_l, len(fill_color)], dtype=np.float)
-
- if show_radius is None:
- show_radius = np.linalg.norm(xz, axis=-1).max()
-
- xz = xz * side_l / (2*scale) / show_radius
- # v<-----------|o
- # | | |
- # | ----|----z |
- # | | |
- # | x \|/
- # |------------u
- xz[:, 1] = -xz[:, 1]
- xz += side_l // 2 # moving to center
- xz = xz.astype(np.int)
- cv2.fillPoly(board, [xz], fill_color)
- if border_color:
- cv2.drawContours(board, [xz], 0, border_color, 2)
-
- if marker_color is not None:
- for p in xz:
- cv2.drawMarker(board, tuple(p), marker_color, markerType=0, markerSize=10, thickness=2)
- if center_color is not None:
- cv2.drawMarker(board, tuple([side_l // 2, side_l // 2]), center_color, markerType=0, markerSize=10, thickness=2)
-
- if show:
- # plt.rcParams['figure.dpi'] = 300
- plt.axis('off')
- plt.imshow(board[..., 0] if board.shape[-1] == 1 else board)
- plt.show()
-
- return board
-
-
-def draw_iou_floorplan(dt_xz, gt_xz, show_radius=None, show=False, side_l=512,
- iou_2d=None, iou_3d=None, dt_board_color=None, gt_board_color=None):
- """
- :param gt_board_color:
- :param dt_board_color:
- :param dt_xz: [[x1, z1], [x2, z2], ....]
- :param gt_xz: [[x1, z1], [x2, z2], ....]
- :param show:
- :param side_l: side length (pixel) of the output result
- :param show_radius: The displayed maximum radius m (proportional to the projection plane plan_y of xz),
- such as set to 1, means that the pixel value of side_l/2 is expressed as 1m, if not set this value to display all
- :param iou_2d:
- :param iou_3d:
- :return:
- """
- if dt_board_color is None:
- dt_board_color = [0, 1, 0, 1]
- if gt_board_color is None:
- gt_board_color = [0, 0, 1, 1]
- center_color = [1, 0, 0, 1]
- fill_color = [0.2, 0.2, 0.2, 0.2]
-
- if show_radius is None:
- # niform scale
- gt_radius = np.linalg.norm(gt_xz, axis=-1).max()
- dt_radius = np.linalg.norm(dt_xz, axis=-1).max()
- show_radius = gt_radius if gt_radius > dt_radius else dt_radius
-
- dt_floorplan = draw_floorplan(dt_xz, show_radius=show_radius, fill_color=fill_color,
- border_color=dt_board_color, side_l=side_l, show=False)
- gt_floorplan = draw_floorplan(gt_xz, show_radius=show_radius, fill_color=fill_color,
- border_color=gt_board_color, side_l=side_l, show=False,
- center_color=[1, 0, 0, 1])
-
- dt_floorplan = Image.fromarray((dt_floorplan * 255).astype(np.uint8), mode='RGBA')
- gt_floorplan = Image.fromarray((gt_floorplan * 255).astype(np.uint8), mode='RGBA')
- iou_floorplan = Image.alpha_composite(gt_floorplan, dt_floorplan)
-
- back = np.zeros([side_l, side_l, len(fill_color)], dtype=np.float)
- back[..., :] = [0.8, 0.8, 0.8, 1]
- back = Image.fromarray((back * 255).astype(np.uint8), mode='RGBA')
-
- iou_floorplan = Image.alpha_composite(back, iou_floorplan).convert("RGB")
- iou_floorplan = np.array(iou_floorplan) / 255.0
-
- if iou_2d is not None:
- cv2.putText(iou_floorplan, f'2d:{iou_2d * 100:.2f}', (10, 30), 2, 1, (0, 0, 0), 1)
- if iou_3d is not None:
- cv2.putText(iou_floorplan, f'3d:{iou_3d * 100:.2f}', (10, 60), 2, 1, (0, 0, 0), 1)
-
- if show:
- plt.axis('off')
- plt.imshow(iou_floorplan)
- plt.show()
- return iou_floorplan
-
-
-if __name__ == '__main__':
- import numpy as np
- from dataset.mp3d_dataset import MP3DDataset
- from utils.boundary import depth2boundaries
- from utils.conversion import uv2xyz
- from visualization.boundary import draw_boundaries
-
- mp3d_dataset = MP3DDataset(root_dir='../src/dataset/mp3d', mode='train')
- gt = mp3d_dataset.__getitem__(0)
-
- # boundary_list = depth2boundaries(gt['ratio'], gt['depth'], step=None)
- # pano_img = draw_boundaries(gt['image'].transpose(1, 2, 0), boundary_list=boundary_list, show=True)
- # draw_floorplan(uv2xyz(boundary_list[0])[..., ::2], show=True, marker_color=None, center_color=0.8)
- # draw_floorplan(depth2xyz(gt['depth'])[..., ::2], show=True, marker_color=None, center_color=0.8)
-
- corners = gt['corners'][gt['corners'][..., 0] + gt['corners'][..., 1] != 0]
- dt_corners = corners + 0.1
- # img = draw_floorplan(uv2xyz(corners)[..., ::2], show=True, fill_color=[0.8, 0.8, 0.8, 0.2],
- # marker_color=None, center_color=[1, 0, 0, 1], border_color=[0, 0, 1, 1])
- # cv2.imwrite('../src/fig/flp.png', (img*255).astype(np.uint8))
-
- img = draw_iou_floorplan(uv2xyz(dt_corners)[..., ::2], uv2xyz(corners)[..., ::2], side_l=512, show=True)
- img[..., 0:3] = img[..., 0:3][..., ::-1]
- # cv2.imwrite('../src/fig/flp.png', (img*255).astype(np.uint8))
-
diff --git a/spaces/DragGan/DragGan-Inversion/stylegan_human/pti/training/coaches/base_coach.py b/spaces/DragGan/DragGan-Inversion/stylegan_human/pti/training/coaches/base_coach.py
deleted file mode 100644
index 1d754dccd39786bcec2a6f08ad1e6fd845319bab..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan-Inversion/stylegan_human/pti/training/coaches/base_coach.py
+++ /dev/null
@@ -1,159 +0,0 @@
-import abc
-import os
-import pickle
-from argparse import Namespace
-import wandb
-import os.path
-from .localitly_regulizer import Space_Regulizer, l2_loss
-import torch
-from torchvision import transforms
-from lpips import LPIPS
-from pti.training.projectors import w_projector
-from pti.pti_configs import global_config, paths_config, hyperparameters
-from pti.pti_models.e4e.psp import pSp
-from utils.log_utils import log_image_from_w
-from utils.models_utils import toogle_grad, load_old_G
-
-
-class BaseCoach:
- def __init__(self, data_loader, use_wandb):
-
- self.use_wandb = use_wandb
- self.data_loader = data_loader
- self.w_pivots = {}
- self.image_counter = 0
-
- if hyperparameters.first_inv_type == 'w+':
- self.initilize_e4e()
-
- self.e4e_image_transform = transforms.Compose([
- transforms.ToPILImage(),
- transforms.Resize((256, 128)),
- transforms.ToTensor(),
- transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
-
- # Initialize loss
- self.lpips_loss = LPIPS(net=hyperparameters.lpips_type).to(
- global_config.device).eval()
-
- self.restart_training()
-
- # Initialize checkpoint dir
- self.checkpoint_dir = paths_config.checkpoints_dir
- os.makedirs(self.checkpoint_dir, exist_ok=True)
-
- def restart_training(self):
-
- # Initialize networks
- self.G = load_old_G()
- toogle_grad(self.G, True)
-
- self.original_G = load_old_G()
-
- self.space_regulizer = Space_Regulizer(
- self.original_G, self.lpips_loss)
- self.optimizer = self.configure_optimizers()
-
- def get_inversion(self, w_path_dir, image_name, image):
- embedding_dir = f'{w_path_dir}/{paths_config.pti_results_keyword}/{image_name}'
- os.makedirs(embedding_dir, exist_ok=True)
-
- w_pivot = None
-
- if hyperparameters.use_last_w_pivots:
- w_pivot = self.load_inversions(w_path_dir, image_name)
-
- if not hyperparameters.use_last_w_pivots or w_pivot is None:
- w_pivot = self.calc_inversions(image, image_name)
- torch.save(w_pivot, f'{embedding_dir}/0.pt')
-
- w_pivot = w_pivot.to(global_config.device)
- return w_pivot
-
- def load_inversions(self, w_path_dir, image_name):
- if image_name in self.w_pivots:
- return self.w_pivots[image_name]
-
- if hyperparameters.first_inv_type == 'w+':
- w_potential_path = f'{w_path_dir}/{paths_config.e4e_results_keyword}/{image_name}/0.pt'
- else:
- w_potential_path = f'{w_path_dir}/{paths_config.pti_results_keyword}/{image_name}/0.pt'
- if not os.path.isfile(w_potential_path):
- return None
- w = torch.load(w_potential_path).to(global_config.device)
- self.w_pivots[image_name] = w
- return w
-
- def calc_inversions(self, image, image_name):
- if hyperparameters.first_inv_type == 'w+':
- w = self.get_e4e_inversion(image)
-
- else:
- id_image = torch.squeeze(
- (image.to(global_config.device) + 1) / 2) * 255
- w = w_projector.project(self.G, id_image, device=torch.device(global_config.device), w_avg_samples=600,
- num_steps=hyperparameters.first_inv_steps, w_name=image_name,
- use_wandb=self.use_wandb)
-
- return w
-
- @abc.abstractmethod
- def train(self):
- pass
-
- def configure_optimizers(self):
- optimizer = torch.optim.Adam(
- self.G.parameters(), lr=hyperparameters.pti_learning_rate)
-
- return optimizer
-
- def calc_loss(self, generated_images, real_images, log_name, new_G, use_ball_holder, w_batch):
- loss = 0.0
-
- if hyperparameters.pt_l2_lambda > 0:
- l2_loss_val = l2_loss(generated_images, real_images)
- if self.use_wandb:
- wandb.log({f'MSE_loss_val_{log_name}': l2_loss_val.detach(
- ).cpu()}, step=global_config.training_step)
- loss += l2_loss_val * hyperparameters.pt_l2_lambda
- if hyperparameters.pt_lpips_lambda > 0:
- loss_lpips = self.lpips_loss(generated_images, real_images)
- loss_lpips = torch.squeeze(loss_lpips)
- if self.use_wandb:
- wandb.log({f'LPIPS_loss_val_{log_name}': loss_lpips.detach(
- ).cpu()}, step=global_config.training_step)
- loss += loss_lpips * hyperparameters.pt_lpips_lambda
-
- if use_ball_holder and hyperparameters.use_locality_regularization:
- ball_holder_loss_val = self.space_regulizer.space_regulizer_loss(
- new_G, w_batch, use_wandb=self.use_wandb)
- loss += ball_holder_loss_val
-
- return loss, l2_loss_val, loss_lpips
-
- def forward(self, w):
- generated_images = self.G.synthesis(
- w, noise_mode='const', force_fp32=True)
-
- return generated_images
-
- def initilize_e4e(self):
- ckpt = torch.load(paths_config.e4e, map_location='cpu')
- opts = ckpt['opts']
- opts['batch_size'] = hyperparameters.train_batch_size
- opts['checkpoint_path'] = paths_config.e4e
- opts = Namespace(**opts)
- self.e4e_inversion_net = pSp(opts)
- self.e4e_inversion_net.eval()
- self.e4e_inversion_net = self.e4e_inversion_net.to(
- global_config.device)
- toogle_grad(self.e4e_inversion_net, False)
-
- def get_e4e_inversion(self, image):
- image = (image + 1) / 2
- new_image = self.e4e_image_transform(image[0]).to(global_config.device)
- _, w = self.e4e_inversion_net(new_image.unsqueeze(0), randomize_noise=False, return_latents=True, resize=False,
- input_code=False)
- if self.use_wandb:
- log_image_from_w(w, self.G, 'First e4e inversion')
- return w
diff --git a/spaces/DragGan/DragGan-Inversion/stylegan_human/torch_utils/misc.py b/spaces/DragGan/DragGan-Inversion/stylegan_human/torch_utils/misc.py
deleted file mode 100644
index 5470dcfc5e59e6bc4484ca3075cd09a708e43467..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan-Inversion/stylegan_human/torch_utils/misc.py
+++ /dev/null
@@ -1,294 +0,0 @@
-# Copyright (c) SenseTime Research. All rights reserved.
-
-# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-import re
-import contextlib
-import numpy as np
-import torch
-import warnings
-import dnnlib
-
-# ----------------------------------------------------------------------------
-# Cached construction of constant tensors. Avoids CPU=>GPU copy when the
-# same constant is used multiple times.
-
-_constant_cache = dict()
-
-
-def constant(value, shape=None, dtype=None, device=None, memory_format=None):
- value = np.asarray(value)
- if shape is not None:
- shape = tuple(shape)
- if dtype is None:
- dtype = torch.get_default_dtype()
- if device is None:
- device = torch.device('cpu')
- if memory_format is None:
- memory_format = torch.contiguous_format
-
- key = (value.shape, value.dtype, value.tobytes(),
- shape, dtype, device, memory_format)
- tensor = _constant_cache.get(key, None)
- if tensor is None:
- tensor = torch.as_tensor(value.copy(), dtype=dtype, device=device)
- if shape is not None:
- tensor, _ = torch.broadcast_tensors(tensor, torch.empty(shape))
- tensor = tensor.contiguous(memory_format=memory_format)
- _constant_cache[key] = tensor
- return tensor
-
-# ----------------------------------------------------------------------------
-# Replace NaN/Inf with specified numerical values.
-
-
-try:
- nan_to_num = torch.nan_to_num # 1.8.0a0
-except AttributeError:
- def nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None): # pylint: disable=redefined-builtin
- assert isinstance(input, torch.Tensor)
- if posinf is None:
- posinf = torch.finfo(input.dtype).max
- if neginf is None:
- neginf = torch.finfo(input.dtype).min
- assert nan == 0
- return torch.clamp(input.unsqueeze(0).nansum(0), min=neginf, max=posinf, out=out)
-
-# ----------------------------------------------------------------------------
-# Symbolic assert.
-
-try:
- symbolic_assert = torch._assert # 1.8.0a0 # pylint: disable=protected-access
-except AttributeError:
- symbolic_assert = torch.Assert # 1.7.0
-
-# ----------------------------------------------------------------------------
-# Context manager to suppress known warnings in torch.jit.trace().
-
-
-class suppress_tracer_warnings(warnings.catch_warnings):
- def __enter__(self):
- super().__enter__()
- warnings.simplefilter('ignore', category=torch.jit.TracerWarning)
- return self
-
-# ----------------------------------------------------------------------------
-# Assert that the shape of a tensor matches the given list of integers.
-# None indicates that the size of a dimension is allowed to vary.
-# Performs symbolic assertion when used in torch.jit.trace().
-
-
-def assert_shape(tensor, ref_shape):
- if tensor.ndim != len(ref_shape):
- raise AssertionError(
- f'Wrong number of dimensions: got {tensor.ndim}, expected {len(ref_shape)}')
- for idx, (size, ref_size) in enumerate(zip(tensor.shape, ref_shape)):
- if ref_size is None:
- pass
- elif isinstance(ref_size, torch.Tensor):
- with suppress_tracer_warnings(): # as_tensor results are registered as constants
- symbolic_assert(torch.equal(torch.as_tensor(
- size), ref_size), f'Wrong size for dimension {idx}')
- elif isinstance(size, torch.Tensor):
- with suppress_tracer_warnings(): # as_tensor results are registered as constants
- symbolic_assert(torch.equal(size, torch.as_tensor(
- ref_size)), f'Wrong size for dimension {idx}: expected {ref_size}')
- elif size != ref_size:
- raise AssertionError(
- f'Wrong size for dimension {idx}: got {size}, expected {ref_size}')
-
-# ----------------------------------------------------------------------------
-# Function decorator that calls torch.autograd.profiler.record_function().
-
-
-def profiled_function(fn):
- def decorator(*args, **kwargs):
- with torch.autograd.profiler.record_function(fn.__name__):
- return fn(*args, **kwargs)
- decorator.__name__ = fn.__name__
- return decorator
-
-# ----------------------------------------------------------------------------
-# Sampler for torch.utils.data.DataLoader that loops over the dataset
-# indefinitely, shuffling items as it goes.
-
-
-class InfiniteSampler(torch.utils.data.Sampler):
- def __init__(self, dataset, rank=0, num_replicas=1, shuffle=True, seed=0, window_size=0.5):
- assert len(dataset) > 0
- assert num_replicas > 0
- assert 0 <= rank < num_replicas
- assert 0 <= window_size <= 1
- super().__init__(dataset)
- self.dataset = dataset
- self.rank = rank
- self.num_replicas = num_replicas
- self.shuffle = shuffle
- self.seed = seed
- self.window_size = window_size
-
- def __iter__(self):
- order = np.arange(len(self.dataset))
- rnd = None
- window = 0
- if self.shuffle:
- rnd = np.random.RandomState(self.seed)
- rnd.shuffle(order)
- window = int(np.rint(order.size * self.window_size))
-
- idx = 0
- while True:
- i = idx % order.size
- if idx % self.num_replicas == self.rank:
- yield order[i]
- if window >= 2:
- j = (i - rnd.randint(window)) % order.size
- order[i], order[j] = order[j], order[i]
- idx += 1
-
-# ----------------------------------------------------------------------------
-# Utilities for operating with torch.nn.Module parameters and buffers.
-
-
-def params_and_buffers(module):
- assert isinstance(module, torch.nn.Module)
- return list(module.parameters()) + list(module.buffers())
-
-
-def named_params_and_buffers(module):
- assert isinstance(module, torch.nn.Module)
- return list(module.named_parameters()) + list(module.named_buffers())
-
-
-def copy_params_and_buffers(src_module, dst_module, require_all=False):
- assert isinstance(src_module, torch.nn.Module)
- assert isinstance(dst_module, torch.nn.Module)
- src_tensors = {name: tensor for name,
- tensor in named_params_and_buffers(src_module)}
- for name, tensor in named_params_and_buffers(dst_module):
- assert (name in src_tensors) or (not require_all)
- if name in src_tensors:
- tensor.copy_(src_tensors[name].detach()).requires_grad_(
- tensor.requires_grad)
-
-# ----------------------------------------------------------------------------
-# Context manager for easily enabling/disabling DistributedDataParallel
-# synchronization.
-
-
-@contextlib.contextmanager
-def ddp_sync(module, sync):
- assert isinstance(module, torch.nn.Module)
- if sync or not isinstance(module, torch.nn.parallel.DistributedDataParallel):
- yield
- else:
- with module.no_sync():
- yield
-
-# ----------------------------------------------------------------------------
-# Check DistributedDataParallel consistency across processes.
-
-
-def check_ddp_consistency(module, ignore_regex=None):
- assert isinstance(module, torch.nn.Module)
- for name, tensor in named_params_and_buffers(module):
- fullname = type(module).__name__ + '.' + name
- if ignore_regex is not None and re.fullmatch(ignore_regex, fullname):
- continue
- tensor = tensor.detach()
- other = tensor.clone()
- torch.distributed.broadcast(tensor=other, src=0)
- assert (nan_to_num(tensor) == nan_to_num(other)).all(), fullname
-
-# ----------------------------------------------------------------------------
-# Print summary table of module hierarchy.
-
-
-def print_module_summary(module, inputs, max_nesting=3, skip_redundant=True):
- assert isinstance(module, torch.nn.Module)
- assert not isinstance(module, torch.jit.ScriptModule)
- assert isinstance(inputs, (tuple, list))
-
- # Register hooks.
- entries = []
- nesting = [0]
-
- def pre_hook(_mod, _inputs):
- nesting[0] += 1
-
- def post_hook(mod, _inputs, outputs):
- nesting[0] -= 1
- if nesting[0] <= max_nesting:
- outputs = list(outputs) if isinstance(
- outputs, (tuple, list)) else [outputs]
- outputs = [t for t in outputs if isinstance(t, torch.Tensor)]
- entries.append(dnnlib.EasyDict(mod=mod, outputs=outputs))
- hooks = [mod.register_forward_pre_hook(
- pre_hook) for mod in module.modules()]
- hooks += [mod.register_forward_hook(post_hook) for mod in module.modules()]
-
- # Run module.
- outputs = module(*inputs)
- for hook in hooks:
- hook.remove()
-
- # Identify unique outputs, parameters, and buffers.
- tensors_seen = set()
- for e in entries:
- e.unique_params = [
- t for t in e.mod.parameters() if id(t) not in tensors_seen]
- e.unique_buffers = [
- t for t in e.mod.buffers() if id(t) not in tensors_seen]
- e.unique_outputs = [t for t in e.outputs if id(t) not in tensors_seen]
- tensors_seen |= {id(t) for t in e.unique_params +
- e.unique_buffers + e.unique_outputs}
-
- # Filter out redundant entries.
- if skip_redundant:
- entries = [e for e in entries if len(e.unique_params) or len(
- e.unique_buffers) or len(e.unique_outputs)]
-
- # Construct table.
- rows = [[type(module).__name__, 'Parameters',
- 'Buffers', 'Output shape', 'Datatype']]
- rows += [['---'] * len(rows[0])]
- param_total = 0
- buffer_total = 0
- submodule_names = {mod: name for name, mod in module.named_modules()}
- for e in entries:
- name = '' if e.mod is module else submodule_names[e.mod]
- param_size = sum(t.numel() for t in e.unique_params)
- buffer_size = sum(t.numel() for t in e.unique_buffers)
- output_shapes = [str(list(e.outputs[0].shape)) for t in e.outputs]
- output_dtypes = [str(t.dtype).split('.')[-1] for t in e.outputs]
- rows += [[
- name + (':0' if len(e.outputs) >= 2 else ''),
- str(param_size) if param_size else '-',
- str(buffer_size) if buffer_size else '-',
- (output_shapes + ['-'])[0],
- (output_dtypes + ['-'])[0],
- ]]
- for idx in range(1, len(e.outputs)):
- rows += [[name + f':{idx}', '-', '-',
- output_shapes[idx], output_dtypes[idx]]]
- param_total += param_size
- buffer_total += buffer_size
- rows += [['---'] * len(rows[0])]
- rows += [['Total', str(param_total), str(buffer_total), '-', '-']]
-
- # Print table.
- widths = [max(len(cell) for cell in column) for column in zip(*rows)]
- print()
- for row in rows:
- print(' '.join(cell + ' ' * (width - len(cell))
- for cell, width in zip(row, widths)))
- print()
- return outputs
-
-# ----------------------------------------------------------------------------
diff --git a/spaces/DragGan/DragGan/training/training_loop.py b/spaces/DragGan/DragGan/training/training_loop.py
deleted file mode 100644
index ddd0c15e226b0436048fee4469341e3fb653c71b..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan/training/training_loop.py
+++ /dev/null
@@ -1,427 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-"""Main training loop."""
-
-import os
-import time
-import copy
-import json
-import pickle
-import psutil
-import PIL.Image
-import numpy as np
-import torch
-import dnnlib
-from torch_utils import misc
-from torch_utils import training_stats
-from torch_utils.ops import conv2d_gradfix
-from torch_utils.ops import grid_sample_gradfix
-
-import legacy
-from metrics import metric_main
-
-#----------------------------------------------------------------------------
-
-def setup_snapshot_image_grid(training_set, random_seed=0):
- rnd = np.random.RandomState(random_seed)
- gw = np.clip(7680 // training_set.image_shape[2], 7, 32)
- gh = np.clip(4320 // training_set.image_shape[1], 4, 32)
-
- # No labels => show random subset of training samples.
- if not training_set.has_labels:
- all_indices = list(range(len(training_set)))
- rnd.shuffle(all_indices)
- grid_indices = [all_indices[i % len(all_indices)] for i in range(gw * gh)]
-
- else:
- # Group training samples by label.
- label_groups = dict() # label => [idx, ...]
- for idx in range(len(training_set)):
- label = tuple(training_set.get_details(idx).raw_label.flat[::-1])
- if label not in label_groups:
- label_groups[label] = []
- label_groups[label].append(idx)
-
- # Reorder.
- label_order = sorted(label_groups.keys())
- for label in label_order:
- rnd.shuffle(label_groups[label])
-
- # Organize into grid.
- grid_indices = []
- for y in range(gh):
- label = label_order[y % len(label_order)]
- indices = label_groups[label]
- grid_indices += [indices[x % len(indices)] for x in range(gw)]
- label_groups[label] = [indices[(i + gw) % len(indices)] for i in range(len(indices))]
-
- # Load data.
- images, labels = zip(*[training_set[i] for i in grid_indices])
- return (gw, gh), np.stack(images), np.stack(labels)
-
-#----------------------------------------------------------------------------
-
-def save_image_grid(img, fname, drange, grid_size):
- lo, hi = drange
- img = np.asarray(img, dtype=np.float32)
- img = (img - lo) * (255 / (hi - lo))
- img = np.rint(img).clip(0, 255).astype(np.uint8)
-
- gw, gh = grid_size
- _N, C, H, W = img.shape
- img = img.reshape([gh, gw, C, H, W])
- img = img.transpose(0, 3, 1, 4, 2)
- img = img.reshape([gh * H, gw * W, C])
-
- assert C in [1, 3]
- if C == 1:
- PIL.Image.fromarray(img[:, :, 0], 'L').save(fname)
- if C == 3:
- PIL.Image.fromarray(img, 'RGB').save(fname)
-
-#----------------------------------------------------------------------------
-
-def training_loop(
- run_dir = '.', # Output directory.
- training_set_kwargs = {}, # Options for training set.
- data_loader_kwargs = {}, # Options for torch.utils.data.DataLoader.
- G_kwargs = {}, # Options for generator network.
- D_kwargs = {}, # Options for discriminator network.
- G_opt_kwargs = {}, # Options for generator optimizer.
- D_opt_kwargs = {}, # Options for discriminator optimizer.
- augment_kwargs = None, # Options for augmentation pipeline. None = disable.
- loss_kwargs = {}, # Options for loss function.
- metrics = [], # Metrics to evaluate during training.
- random_seed = 0, # Global random seed.
- num_gpus = 1, # Number of GPUs participating in the training.
- rank = 0, # Rank of the current process in [0, num_gpus[.
- batch_size = 4, # Total batch size for one training iteration. Can be larger than batch_gpu * num_gpus.
- batch_gpu = 4, # Number of samples processed at a time by one GPU.
- ema_kimg = 10, # Half-life of the exponential moving average (EMA) of generator weights.
- ema_rampup = 0.05, # EMA ramp-up coefficient. None = no rampup.
- G_reg_interval = None, # How often to perform regularization for G? None = disable lazy regularization.
- D_reg_interval = 16, # How often to perform regularization for D? None = disable lazy regularization.
- augment_p = 0, # Initial value of augmentation probability.
- ada_target = None, # ADA target value. None = fixed p.
- ada_interval = 4, # How often to perform ADA adjustment?
- ada_kimg = 500, # ADA adjustment speed, measured in how many kimg it takes for p to increase/decrease by one unit.
- total_kimg = 25000, # Total length of the training, measured in thousands of real images.
- kimg_per_tick = 4, # Progress snapshot interval.
- image_snapshot_ticks = 50, # How often to save image snapshots? None = disable.
- network_snapshot_ticks = 50, # How often to save network snapshots? None = disable.
- resume_pkl = None, # Network pickle to resume training from.
- resume_kimg = 0, # First kimg to report when resuming training.
- cudnn_benchmark = True, # Enable torch.backends.cudnn.benchmark?
- abort_fn = None, # Callback function for determining whether to abort training. Must return consistent results across ranks.
- progress_fn = None, # Callback function for updating training progress. Called for all ranks.
-):
- # Initialize.
- start_time = time.time()
- device = torch.device('cuda', rank)
- np.random.seed(random_seed * num_gpus + rank)
- torch.manual_seed(random_seed * num_gpus + rank)
- torch.backends.cudnn.benchmark = cudnn_benchmark # Improves training speed.
- torch.backends.cuda.matmul.allow_tf32 = False # Improves numerical accuracy.
- torch.backends.cudnn.allow_tf32 = False # Improves numerical accuracy.
- conv2d_gradfix.enabled = True # Improves training speed.
- grid_sample_gradfix.enabled = True # Avoids errors with the augmentation pipe.
-
- # Load training set.
- if rank == 0:
- print('Loading training set...')
- training_set = dnnlib.util.construct_class_by_name(**training_set_kwargs) # subclass of training.dataset.Dataset
- training_set_sampler = misc.InfiniteSampler(dataset=training_set, rank=rank, num_replicas=num_gpus, seed=random_seed)
- training_set_iterator = iter(torch.utils.data.DataLoader(dataset=training_set, sampler=training_set_sampler, batch_size=batch_size//num_gpus, **data_loader_kwargs))
- if rank == 0:
- print()
- print('Num images: ', len(training_set))
- print('Image shape:', training_set.image_shape)
- print('Label shape:', training_set.label_shape)
- print()
-
- # Construct networks.
- if rank == 0:
- print('Constructing networks...')
- common_kwargs = dict(c_dim=training_set.label_dim, img_resolution=training_set.resolution, img_channels=training_set.num_channels)
- G = dnnlib.util.construct_class_by_name(**G_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
- D = dnnlib.util.construct_class_by_name(**D_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
- G_ema = copy.deepcopy(G).eval()
-
- # Resume from existing pickle.
- if (resume_pkl is not None) and (rank == 0):
- print(f'Resuming from "{resume_pkl}"')
- with dnnlib.util.open_url(resume_pkl) as f:
- resume_data = legacy.load_network_pkl(f)
- for name, module in [('G', G), ('D', D), ('G_ema', G_ema)]:
- misc.copy_params_and_buffers(resume_data[name], module, require_all=False)
-
- # Print network summary tables.
- if rank == 0:
- z = torch.empty([batch_gpu, G.z_dim], device=device)
- c = torch.empty([batch_gpu, G.c_dim], device=device)
- img = misc.print_module_summary(G, [z, c])
- misc.print_module_summary(D, [img, c])
-
- # Setup augmentation.
- if rank == 0:
- print('Setting up augmentation...')
- augment_pipe = None
- ada_stats = None
- if (augment_kwargs is not None) and (augment_p > 0 or ada_target is not None):
- augment_pipe = dnnlib.util.construct_class_by_name(**augment_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
- augment_pipe.p.copy_(torch.as_tensor(augment_p))
- if ada_target is not None:
- ada_stats = training_stats.Collector(regex='Loss/signs/real')
-
- # Distribute across GPUs.
- if rank == 0:
- print(f'Distributing across {num_gpus} GPUs...')
- for module in [G, D, G_ema, augment_pipe]:
- if module is not None and num_gpus > 1:
- for param in misc.params_and_buffers(module):
- torch.distributed.broadcast(param, src=0)
-
- # Setup training phases.
- if rank == 0:
- print('Setting up training phases...')
- loss = dnnlib.util.construct_class_by_name(device=device, G=G, D=D, augment_pipe=augment_pipe, **loss_kwargs) # subclass of training.loss.Loss
- phases = []
- for name, module, opt_kwargs, reg_interval in [('G', G, G_opt_kwargs, G_reg_interval), ('D', D, D_opt_kwargs, D_reg_interval)]:
- if reg_interval is None:
- opt = dnnlib.util.construct_class_by_name(params=module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
- phases += [dnnlib.EasyDict(name=name+'both', module=module, opt=opt, interval=1)]
- else: # Lazy regularization.
- mb_ratio = reg_interval / (reg_interval + 1)
- opt_kwargs = dnnlib.EasyDict(opt_kwargs)
- opt_kwargs.lr = opt_kwargs.lr * mb_ratio
- opt_kwargs.betas = [beta ** mb_ratio for beta in opt_kwargs.betas]
- opt = dnnlib.util.construct_class_by_name(module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
- phases += [dnnlib.EasyDict(name=name+'main', module=module, opt=opt, interval=1)]
- phases += [dnnlib.EasyDict(name=name+'reg', module=module, opt=opt, interval=reg_interval)]
- for phase in phases:
- phase.start_event = None
- phase.end_event = None
- if rank == 0:
- phase.start_event = torch.cuda.Event(enable_timing=True)
- phase.end_event = torch.cuda.Event(enable_timing=True)
-
- # Export sample images.
- grid_size = None
- grid_z = None
- grid_c = None
- if rank == 0:
- print('Exporting sample images...')
- grid_size, images, labels = setup_snapshot_image_grid(training_set=training_set)
- save_image_grid(images, os.path.join(run_dir, 'reals.png'), drange=[0,255], grid_size=grid_size)
- grid_z = torch.randn([labels.shape[0], G.z_dim], device=device).split(batch_gpu)
- grid_c = torch.from_numpy(labels).to(device).split(batch_gpu)
- images = torch.cat([G_ema(z=z, c=c, noise_mode='const').cpu() for z, c in zip(grid_z, grid_c)]).numpy()
- save_image_grid(images, os.path.join(run_dir, 'fakes_init.png'), drange=[-1,1], grid_size=grid_size)
-
- # Initialize logs.
- if rank == 0:
- print('Initializing logs...')
- stats_collector = training_stats.Collector(regex='.*')
- stats_metrics = dict()
- stats_jsonl = None
- stats_tfevents = None
- if rank == 0:
- stats_jsonl = open(os.path.join(run_dir, 'stats.jsonl'), 'wt')
- try:
- import torch.utils.tensorboard as tensorboard
- stats_tfevents = tensorboard.SummaryWriter(run_dir)
- except ImportError as err:
- print('Skipping tfevents export:', err)
-
- # Train.
- if rank == 0:
- print(f'Training for {total_kimg} kimg...')
- print()
- cur_nimg = resume_kimg * 1000
- cur_tick = 0
- tick_start_nimg = cur_nimg
- tick_start_time = time.time()
- maintenance_time = tick_start_time - start_time
- batch_idx = 0
- if progress_fn is not None:
- progress_fn(0, total_kimg)
- while True:
-
- # Fetch training data.
- with torch.autograd.profiler.record_function('data_fetch'):
- phase_real_img, phase_real_c = next(training_set_iterator)
- phase_real_img = (phase_real_img.to(device).to(torch.float32) / 127.5 - 1).split(batch_gpu)
- phase_real_c = phase_real_c.to(device).split(batch_gpu)
- all_gen_z = torch.randn([len(phases) * batch_size, G.z_dim], device=device)
- all_gen_z = [phase_gen_z.split(batch_gpu) for phase_gen_z in all_gen_z.split(batch_size)]
- all_gen_c = [training_set.get_label(np.random.randint(len(training_set))) for _ in range(len(phases) * batch_size)]
- all_gen_c = torch.from_numpy(np.stack(all_gen_c)).pin_memory().to(device)
- all_gen_c = [phase_gen_c.split(batch_gpu) for phase_gen_c in all_gen_c.split(batch_size)]
-
- # Execute training phases.
- for phase, phase_gen_z, phase_gen_c in zip(phases, all_gen_z, all_gen_c):
- if batch_idx % phase.interval != 0:
- continue
- if phase.start_event is not None:
- phase.start_event.record(torch.cuda.current_stream(device))
-
- # Accumulate gradients.
- phase.opt.zero_grad(set_to_none=True)
- phase.module.requires_grad_(True)
- for real_img, real_c, gen_z, gen_c in zip(phase_real_img, phase_real_c, phase_gen_z, phase_gen_c):
- loss.accumulate_gradients(phase=phase.name, real_img=real_img, real_c=real_c, gen_z=gen_z, gen_c=gen_c, gain=phase.interval, cur_nimg=cur_nimg)
- phase.module.requires_grad_(False)
-
- # Update weights.
- with torch.autograd.profiler.record_function(phase.name + '_opt'):
- params = [param for param in phase.module.parameters() if param.grad is not None]
- if len(params) > 0:
- flat = torch.cat([param.grad.flatten() for param in params])
- if num_gpus > 1:
- torch.distributed.all_reduce(flat)
- flat /= num_gpus
- misc.nan_to_num(flat, nan=0, posinf=1e5, neginf=-1e5, out=flat)
- grads = flat.split([param.numel() for param in params])
- for param, grad in zip(params, grads):
- param.grad = grad.reshape(param.shape)
- phase.opt.step()
-
- # Phase done.
- if phase.end_event is not None:
- phase.end_event.record(torch.cuda.current_stream(device))
-
- # Update G_ema.
- with torch.autograd.profiler.record_function('Gema'):
- ema_nimg = ema_kimg * 1000
- if ema_rampup is not None:
- ema_nimg = min(ema_nimg, cur_nimg * ema_rampup)
- ema_beta = 0.5 ** (batch_size / max(ema_nimg, 1e-8))
- for p_ema, p in zip(G_ema.parameters(), G.parameters()):
- p_ema.copy_(p.lerp(p_ema, ema_beta))
- for b_ema, b in zip(G_ema.buffers(), G.buffers()):
- b_ema.copy_(b)
-
- # Update state.
- cur_nimg += batch_size
- batch_idx += 1
-
- # Execute ADA heuristic.
- if (ada_stats is not None) and (batch_idx % ada_interval == 0):
- ada_stats.update()
- adjust = np.sign(ada_stats['Loss/signs/real'] - ada_target) * (batch_size * ada_interval) / (ada_kimg * 1000)
- augment_pipe.p.copy_((augment_pipe.p + adjust).max(misc.constant(0, device=device)))
-
- # Perform maintenance tasks once per tick.
- done = (cur_nimg >= total_kimg * 1000)
- if (not done) and (cur_tick != 0) and (cur_nimg < tick_start_nimg + kimg_per_tick * 1000):
- continue
-
- # Print status line, accumulating the same information in training_stats.
- tick_end_time = time.time()
- fields = []
- fields += [f"tick {training_stats.report0('Progress/tick', cur_tick):<5d}"]
- fields += [f"kimg {training_stats.report0('Progress/kimg', cur_nimg / 1e3):<8.1f}"]
- fields += [f"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', tick_end_time - start_time)):<12s}"]
- fields += [f"sec/tick {training_stats.report0('Timing/sec_per_tick', tick_end_time - tick_start_time):<7.1f}"]
- fields += [f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg) * 1e3):<7.2f}"]
- fields += [f"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}"]
- fields += [f"cpumem {training_stats.report0('Resources/cpu_mem_gb', psutil.Process(os.getpid()).memory_info().rss / 2**30):<6.2f}"]
- fields += [f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', torch.cuda.max_memory_allocated(device) / 2**30):<6.2f}"]
- fields += [f"reserved {training_stats.report0('Resources/peak_gpu_mem_reserved_gb', torch.cuda.max_memory_reserved(device) / 2**30):<6.2f}"]
- torch.cuda.reset_peak_memory_stats()
- fields += [f"augment {training_stats.report0('Progress/augment', float(augment_pipe.p.cpu()) if augment_pipe is not None else 0):.3f}"]
- training_stats.report0('Timing/total_hours', (tick_end_time - start_time) / (60 * 60))
- training_stats.report0('Timing/total_days', (tick_end_time - start_time) / (24 * 60 * 60))
- if rank == 0:
- print(' '.join(fields))
-
- # Check for abort.
- if (not done) and (abort_fn is not None) and abort_fn():
- done = True
- if rank == 0:
- print()
- print('Aborting...')
-
- # Save image snapshot.
- if (rank == 0) and (image_snapshot_ticks is not None) and (done or cur_tick % image_snapshot_ticks == 0):
- images = torch.cat([G_ema(z=z, c=c, noise_mode='const').cpu() for z, c in zip(grid_z, grid_c)]).numpy()
- save_image_grid(images, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}.png'), drange=[-1,1], grid_size=grid_size)
-
- # Save network snapshot.
- snapshot_pkl = None
- snapshot_data = None
- if (network_snapshot_ticks is not None) and (done or cur_tick % network_snapshot_ticks == 0):
- snapshot_data = dict(G=G, D=D, G_ema=G_ema, augment_pipe=augment_pipe, training_set_kwargs=dict(training_set_kwargs))
- for key, value in snapshot_data.items():
- if isinstance(value, torch.nn.Module):
- value = copy.deepcopy(value).eval().requires_grad_(False)
- if num_gpus > 1:
- misc.check_ddp_consistency(value, ignore_regex=r'.*\.[^.]+_(avg|ema)')
- for param in misc.params_and_buffers(value):
- torch.distributed.broadcast(param, src=0)
- snapshot_data[key] = value.cpu()
- del value # conserve memory
- snapshot_pkl = os.path.join(run_dir, f'network-snapshot-{cur_nimg//1000:06d}.pkl')
- if rank == 0:
- with open(snapshot_pkl, 'wb') as f:
- pickle.dump(snapshot_data, f)
-
- # Evaluate metrics.
- if (snapshot_data is not None) and (len(metrics) > 0):
- if rank == 0:
- print('Evaluating metrics...')
- for metric in metrics:
- result_dict = metric_main.calc_metric(metric=metric, G=snapshot_data['G_ema'],
- dataset_kwargs=training_set_kwargs, num_gpus=num_gpus, rank=rank, device=device)
- if rank == 0:
- metric_main.report_metric(result_dict, run_dir=run_dir, snapshot_pkl=snapshot_pkl)
- stats_metrics.update(result_dict.results)
- del snapshot_data # conserve memory
-
- # Collect statistics.
- for phase in phases:
- value = []
- if (phase.start_event is not None) and (phase.end_event is not None):
- phase.end_event.synchronize()
- value = phase.start_event.elapsed_time(phase.end_event)
- training_stats.report0('Timing/' + phase.name, value)
- stats_collector.update()
- stats_dict = stats_collector.as_dict()
-
- # Update logs.
- timestamp = time.time()
- if stats_jsonl is not None:
- fields = dict(stats_dict, timestamp=timestamp)
- stats_jsonl.write(json.dumps(fields) + '\n')
- stats_jsonl.flush()
- if stats_tfevents is not None:
- global_step = int(cur_nimg / 1e3)
- walltime = timestamp - start_time
- for name, value in stats_dict.items():
- stats_tfevents.add_scalar(name, value.mean, global_step=global_step, walltime=walltime)
- for name, value in stats_metrics.items():
- stats_tfevents.add_scalar(f'Metrics/{name}', value, global_step=global_step, walltime=walltime)
- stats_tfevents.flush()
- if progress_fn is not None:
- progress_fn(cur_nimg // 1000, total_kimg)
-
- # Update state.
- cur_tick += 1
- tick_start_nimg = cur_nimg
- tick_start_time = time.time()
- maintenance_time = tick_start_time - tick_end_time
- if done:
- break
-
- # Done.
- if rank == 0:
- print()
- print('Exiting...')
-
-#----------------------------------------------------------------------------
diff --git a/spaces/Ekohai/bingAI/Dockerfile b/spaces/Ekohai/bingAI/Dockerfile
deleted file mode 100644
index b2a99b76d77e0d83f1e9f64b39232d9cac271d5a..0000000000000000000000000000000000000000
--- a/spaces/Ekohai/bingAI/Dockerfile
+++ /dev/null
@@ -1,34 +0,0 @@
-# Build Stage
-# 使用 golang:alpine 作为构建阶段的基础镜像
-FROM golang:alpine AS builder
-
-# 添加 git,以便之后能从GitHub克隆项目
-RUN apk --no-cache add git
-
-# 从 GitHub 克隆 go-proxy-bingai 项目到 /workspace/app 目录下
-RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app
-
-# 设置工作目录为之前克隆的项目目录
-WORKDIR /workspace/app
-
-# 编译 go 项目。-ldflags="-s -w" 是为了减少编译后的二进制大小
-RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go
-
-# Runtime Stage
-# 使用轻量级的 alpine 镜像作为运行时的基础镜像
-FROM alpine
-
-# 设置工作目录
-WORKDIR /workspace/app
-
-# 从构建阶段复制编译后的二进制文件到运行时镜像中
-COPY --from=builder /workspace/app/go-proxy-bingai .
-
-# 设置环境变量,此处为随机字符
-ENV Go_Proxy_BingAI_USER_TOKEN_1="kJs6hD72ncMzLaoQWYtX5rG5bE3fZ4PO"
-
-# 暴露8080端口
-EXPOSE 8080
-
-# 容器启动时运行的命令
-CMD ["/workspace/app/go-proxy-bingai"]
\ No newline at end of file
diff --git a/spaces/EmilyBrat/bratty-space-needs-correction/greeting.md b/spaces/EmilyBrat/bratty-space-needs-correction/greeting.md
deleted file mode 100644
index 9ac9999bb6a38e743cc56d1e5abc41aa085b1c7e..0000000000000000000000000000000000000000
--- a/spaces/EmilyBrat/bratty-space-needs-correction/greeting.md
+++ /dev/null
@@ -1 +0,0 @@
-cunny?
\ No newline at end of file
diff --git a/spaces/Farazquraishi/pendora/README.md b/spaces/Farazquraishi/pendora/README.md
deleted file mode 100644
index 1e83c804adaffb8451ffcbdd768620fd5a913ab8..0000000000000000000000000000000000000000
--- a/spaces/Farazquraishi/pendora/README.md
+++ /dev/null
@@ -1,47 +0,0 @@
----
-title: Face Swap
-emoji: 🧙🧙🧙🧙🧙🧙🧙🧙
-colorFrom: purple
-colorTo: green
-sdk: gradio
-app_file: app.py
-pinned: false
-license: cc-by-nc-sa-4.0
-duplicated_from: yotamsapi/face-swap
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio`, `streamlit`, or `static`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
-Path is relative to the root of the repository.
-
-`models`: _List[string]_
-HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space.
-Will be parsed automatically from your code if not specified here.
-
-`datasets`: _List[string]_
-HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space.
-Will be parsed automatically from your code if not specified here.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/FelixLuoX/codeformer/CodeFormer/facelib/utils/face_restoration_helper.py b/spaces/FelixLuoX/codeformer/CodeFormer/facelib/utils/face_restoration_helper.py
deleted file mode 100644
index 5d3fb8f3b95ed9959610e64f6d7373ea8a56ece8..0000000000000000000000000000000000000000
--- a/spaces/FelixLuoX/codeformer/CodeFormer/facelib/utils/face_restoration_helper.py
+++ /dev/null
@@ -1,460 +0,0 @@
-import cv2
-import numpy as np
-import os
-import torch
-from torchvision.transforms.functional import normalize
-
-from facelib.detection import init_detection_model
-from facelib.parsing import init_parsing_model
-from facelib.utils.misc import img2tensor, imwrite, is_gray, bgr2gray
-
-
-def get_largest_face(det_faces, h, w):
-
- def get_location(val, length):
- if val < 0:
- return 0
- elif val > length:
- return length
- else:
- return val
-
- face_areas = []
- for det_face in det_faces:
- left = get_location(det_face[0], w)
- right = get_location(det_face[2], w)
- top = get_location(det_face[1], h)
- bottom = get_location(det_face[3], h)
- face_area = (right - left) * (bottom - top)
- face_areas.append(face_area)
- largest_idx = face_areas.index(max(face_areas))
- return det_faces[largest_idx], largest_idx
-
-
-def get_center_face(det_faces, h=0, w=0, center=None):
- if center is not None:
- center = np.array(center)
- else:
- center = np.array([w / 2, h / 2])
- center_dist = []
- for det_face in det_faces:
- face_center = np.array([(det_face[0] + det_face[2]) / 2, (det_face[1] + det_face[3]) / 2])
- dist = np.linalg.norm(face_center - center)
- center_dist.append(dist)
- center_idx = center_dist.index(min(center_dist))
- return det_faces[center_idx], center_idx
-
-
-class FaceRestoreHelper(object):
- """Helper for the face restoration pipeline (base class)."""
-
- def __init__(self,
- upscale_factor,
- face_size=512,
- crop_ratio=(1, 1),
- det_model='retinaface_resnet50',
- save_ext='png',
- template_3points=False,
- pad_blur=False,
- use_parse=False,
- device=None):
- self.template_3points = template_3points # improve robustness
- self.upscale_factor = int(upscale_factor)
- # the cropped face ratio based on the square face
- self.crop_ratio = crop_ratio # (h, w)
- assert (self.crop_ratio[0] >= 1 and self.crop_ratio[1] >= 1), 'crop ration only supports >=1'
- self.face_size = (int(face_size * self.crop_ratio[1]), int(face_size * self.crop_ratio[0]))
-
- if self.template_3points:
- self.face_template = np.array([[192, 240], [319, 240], [257, 371]])
- else:
- # standard 5 landmarks for FFHQ faces with 512 x 512
- # facexlib
- self.face_template = np.array([[192.98138, 239.94708], [318.90277, 240.1936], [256.63416, 314.01935],
- [201.26117, 371.41043], [313.08905, 371.15118]])
-
- # dlib: left_eye: 36:41 right_eye: 42:47 nose: 30,32,33,34 left mouth corner: 48 right mouth corner: 54
- # self.face_template = np.array([[193.65928, 242.98541], [318.32558, 243.06108], [255.67984, 328.82894],
- # [198.22603, 372.82502], [313.91018, 372.75659]])
-
-
- self.face_template = self.face_template * (face_size / 512.0)
- if self.crop_ratio[0] > 1:
- self.face_template[:, 1] += face_size * (self.crop_ratio[0] - 1) / 2
- if self.crop_ratio[1] > 1:
- self.face_template[:, 0] += face_size * (self.crop_ratio[1] - 1) / 2
- self.save_ext = save_ext
- self.pad_blur = pad_blur
- if self.pad_blur is True:
- self.template_3points = False
-
- self.all_landmarks_5 = []
- self.det_faces = []
- self.affine_matrices = []
- self.inverse_affine_matrices = []
- self.cropped_faces = []
- self.restored_faces = []
- self.pad_input_imgs = []
-
- if device is None:
- self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
- else:
- self.device = device
-
- # init face detection model
- self.face_det = init_detection_model(det_model, half=False, device=self.device)
-
- # init face parsing model
- self.use_parse = use_parse
- self.face_parse = init_parsing_model(model_name='parsenet', device=self.device)
-
- def set_upscale_factor(self, upscale_factor):
- self.upscale_factor = upscale_factor
-
- def read_image(self, img):
- """img can be image path or cv2 loaded image."""
- # self.input_img is Numpy array, (h, w, c), BGR, uint8, [0, 255]
- if isinstance(img, str):
- img = cv2.imread(img)
-
- if np.max(img) > 256: # 16-bit image
- img = img / 65535 * 255
- if len(img.shape) == 2: # gray image
- img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
- elif img.shape[2] == 4: # BGRA image with alpha channel
- img = img[:, :, 0:3]
-
- self.input_img = img
- self.is_gray = is_gray(img, threshold=5)
- if self.is_gray:
- print('Grayscale input: True')
-
- if min(self.input_img.shape[:2])<512:
- f = 512.0/min(self.input_img.shape[:2])
- self.input_img = cv2.resize(self.input_img, (0,0), fx=f, fy=f, interpolation=cv2.INTER_LINEAR)
-
- def get_face_landmarks_5(self,
- only_keep_largest=False,
- only_center_face=False,
- resize=None,
- blur_ratio=0.01,
- eye_dist_threshold=None):
- if resize is None:
- scale = 1
- input_img = self.input_img
- else:
- h, w = self.input_img.shape[0:2]
- scale = resize / min(h, w)
- scale = max(1, scale) # always scale up
- h, w = int(h * scale), int(w * scale)
- interp = cv2.INTER_AREA if scale < 1 else cv2.INTER_LINEAR
- input_img = cv2.resize(self.input_img, (w, h), interpolation=interp)
-
- with torch.no_grad():
- bboxes = self.face_det.detect_faces(input_img)
-
- if bboxes is None or bboxes.shape[0] == 0:
- return 0
- else:
- bboxes = bboxes / scale
-
- for bbox in bboxes:
- # remove faces with too small eye distance: side faces or too small faces
- eye_dist = np.linalg.norm([bbox[6] - bbox[8], bbox[7] - bbox[9]])
- if eye_dist_threshold is not None and (eye_dist < eye_dist_threshold):
- continue
-
- if self.template_3points:
- landmark = np.array([[bbox[i], bbox[i + 1]] for i in range(5, 11, 2)])
- else:
- landmark = np.array([[bbox[i], bbox[i + 1]] for i in range(5, 15, 2)])
- self.all_landmarks_5.append(landmark)
- self.det_faces.append(bbox[0:5])
-
- if len(self.det_faces) == 0:
- return 0
- if only_keep_largest:
- h, w, _ = self.input_img.shape
- self.det_faces, largest_idx = get_largest_face(self.det_faces, h, w)
- self.all_landmarks_5 = [self.all_landmarks_5[largest_idx]]
- elif only_center_face:
- h, w, _ = self.input_img.shape
- self.det_faces, center_idx = get_center_face(self.det_faces, h, w)
- self.all_landmarks_5 = [self.all_landmarks_5[center_idx]]
-
- # pad blurry images
- if self.pad_blur:
- self.pad_input_imgs = []
- for landmarks in self.all_landmarks_5:
- # get landmarks
- eye_left = landmarks[0, :]
- eye_right = landmarks[1, :]
- eye_avg = (eye_left + eye_right) * 0.5
- mouth_avg = (landmarks[3, :] + landmarks[4, :]) * 0.5
- eye_to_eye = eye_right - eye_left
- eye_to_mouth = mouth_avg - eye_avg
-
- # Get the oriented crop rectangle
- # x: half width of the oriented crop rectangle
- x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
- # - np.flipud(eye_to_mouth) * [-1, 1]: rotate 90 clockwise
- # norm with the hypotenuse: get the direction
- x /= np.hypot(*x) # get the hypotenuse of a right triangle
- rect_scale = 1.5
- x *= max(np.hypot(*eye_to_eye) * 2.0 * rect_scale, np.hypot(*eye_to_mouth) * 1.8 * rect_scale)
- # y: half height of the oriented crop rectangle
- y = np.flipud(x) * [-1, 1]
-
- # c: center
- c = eye_avg + eye_to_mouth * 0.1
- # quad: (left_top, left_bottom, right_bottom, right_top)
- quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
- # qsize: side length of the square
- qsize = np.hypot(*x) * 2
- border = max(int(np.rint(qsize * 0.1)), 3)
-
- # get pad
- # pad: (width_left, height_top, width_right, height_bottom)
- pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
- int(np.ceil(max(quad[:, 1]))))
- pad = [
- max(-pad[0] + border, 1),
- max(-pad[1] + border, 1),
- max(pad[2] - self.input_img.shape[0] + border, 1),
- max(pad[3] - self.input_img.shape[1] + border, 1)
- ]
-
- if max(pad) > 1:
- # pad image
- pad_img = np.pad(self.input_img, ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
- # modify landmark coords
- landmarks[:, 0] += pad[0]
- landmarks[:, 1] += pad[1]
- # blur pad images
- h, w, _ = pad_img.shape
- y, x, _ = np.ogrid[:h, :w, :1]
- mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0],
- np.float32(w - 1 - x) / pad[2]),
- 1.0 - np.minimum(np.float32(y) / pad[1],
- np.float32(h - 1 - y) / pad[3]))
- blur = int(qsize * blur_ratio)
- if blur % 2 == 0:
- blur += 1
- blur_img = cv2.boxFilter(pad_img, 0, ksize=(blur, blur))
- # blur_img = cv2.GaussianBlur(pad_img, (blur, blur), 0)
-
- pad_img = pad_img.astype('float32')
- pad_img += (blur_img - pad_img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
- pad_img += (np.median(pad_img, axis=(0, 1)) - pad_img) * np.clip(mask, 0.0, 1.0)
- pad_img = np.clip(pad_img, 0, 255) # float32, [0, 255]
- self.pad_input_imgs.append(pad_img)
- else:
- self.pad_input_imgs.append(np.copy(self.input_img))
-
- return len(self.all_landmarks_5)
-
- def align_warp_face(self, save_cropped_path=None, border_mode='constant'):
- """Align and warp faces with face template.
- """
- if self.pad_blur:
- assert len(self.pad_input_imgs) == len(
- self.all_landmarks_5), f'Mismatched samples: {len(self.pad_input_imgs)} and {len(self.all_landmarks_5)}'
- for idx, landmark in enumerate(self.all_landmarks_5):
- # use 5 landmarks to get affine matrix
- # use cv2.LMEDS method for the equivalence to skimage transform
- # ref: https://blog.csdn.net/yichxi/article/details/115827338
- affine_matrix = cv2.estimateAffinePartial2D(landmark, self.face_template, method=cv2.LMEDS)[0]
- self.affine_matrices.append(affine_matrix)
- # warp and crop faces
- if border_mode == 'constant':
- border_mode = cv2.BORDER_CONSTANT
- elif border_mode == 'reflect101':
- border_mode = cv2.BORDER_REFLECT101
- elif border_mode == 'reflect':
- border_mode = cv2.BORDER_REFLECT
- if self.pad_blur:
- input_img = self.pad_input_imgs[idx]
- else:
- input_img = self.input_img
- cropped_face = cv2.warpAffine(
- input_img, affine_matrix, self.face_size, borderMode=border_mode, borderValue=(135, 133, 132)) # gray
- self.cropped_faces.append(cropped_face)
- # save the cropped face
- if save_cropped_path is not None:
- path = os.path.splitext(save_cropped_path)[0]
- save_path = f'{path}_{idx:02d}.{self.save_ext}'
- imwrite(cropped_face, save_path)
-
- def get_inverse_affine(self, save_inverse_affine_path=None):
- """Get inverse affine matrix."""
- for idx, affine_matrix in enumerate(self.affine_matrices):
- inverse_affine = cv2.invertAffineTransform(affine_matrix)
- inverse_affine *= self.upscale_factor
- self.inverse_affine_matrices.append(inverse_affine)
- # save inverse affine matrices
- if save_inverse_affine_path is not None:
- path, _ = os.path.splitext(save_inverse_affine_path)
- save_path = f'{path}_{idx:02d}.pth'
- torch.save(inverse_affine, save_path)
-
-
- def add_restored_face(self, face):
- if self.is_gray:
- face = bgr2gray(face) # convert img into grayscale
- self.restored_faces.append(face)
-
-
- def paste_faces_to_input_image(self, save_path=None, upsample_img=None, draw_box=False, face_upsampler=None):
- h, w, _ = self.input_img.shape
- h_up, w_up = int(h * self.upscale_factor), int(w * self.upscale_factor)
-
- if upsample_img is None:
- # simply resize the background
- # upsample_img = cv2.resize(self.input_img, (w_up, h_up), interpolation=cv2.INTER_LANCZOS4)
- upsample_img = cv2.resize(self.input_img, (w_up, h_up), interpolation=cv2.INTER_LINEAR)
- else:
- upsample_img = cv2.resize(upsample_img, (w_up, h_up), interpolation=cv2.INTER_LANCZOS4)
-
- assert len(self.restored_faces) == len(
- self.inverse_affine_matrices), ('length of restored_faces and affine_matrices are different.')
-
- inv_mask_borders = []
- for restored_face, inverse_affine in zip(self.restored_faces, self.inverse_affine_matrices):
- if face_upsampler is not None:
- restored_face = face_upsampler.enhance(restored_face, outscale=self.upscale_factor)[0]
- inverse_affine /= self.upscale_factor
- inverse_affine[:, 2] *= self.upscale_factor
- face_size = (self.face_size[0]*self.upscale_factor, self.face_size[1]*self.upscale_factor)
- else:
- # Add an offset to inverse affine matrix, for more precise back alignment
- if self.upscale_factor > 1:
- extra_offset = 0.5 * self.upscale_factor
- else:
- extra_offset = 0
- inverse_affine[:, 2] += extra_offset
- face_size = self.face_size
- inv_restored = cv2.warpAffine(restored_face, inverse_affine, (w_up, h_up))
-
- # if draw_box or not self.use_parse: # use square parse maps
- # mask = np.ones(face_size, dtype=np.float32)
- # inv_mask = cv2.warpAffine(mask, inverse_affine, (w_up, h_up))
- # # remove the black borders
- # inv_mask_erosion = cv2.erode(
- # inv_mask, np.ones((int(2 * self.upscale_factor), int(2 * self.upscale_factor)), np.uint8))
- # pasted_face = inv_mask_erosion[:, :, None] * inv_restored
- # total_face_area = np.sum(inv_mask_erosion) # // 3
- # # add border
- # if draw_box:
- # h, w = face_size
- # mask_border = np.ones((h, w, 3), dtype=np.float32)
- # border = int(1400/np.sqrt(total_face_area))
- # mask_border[border:h-border, border:w-border,:] = 0
- # inv_mask_border = cv2.warpAffine(mask_border, inverse_affine, (w_up, h_up))
- # inv_mask_borders.append(inv_mask_border)
- # if not self.use_parse:
- # # compute the fusion edge based on the area of face
- # w_edge = int(total_face_area**0.5) // 20
- # erosion_radius = w_edge * 2
- # inv_mask_center = cv2.erode(inv_mask_erosion, np.ones((erosion_radius, erosion_radius), np.uint8))
- # blur_size = w_edge * 2
- # inv_soft_mask = cv2.GaussianBlur(inv_mask_center, (blur_size + 1, blur_size + 1), 0)
- # if len(upsample_img.shape) == 2: # upsample_img is gray image
- # upsample_img = upsample_img[:, :, None]
- # inv_soft_mask = inv_soft_mask[:, :, None]
-
- # always use square mask
- mask = np.ones(face_size, dtype=np.float32)
- inv_mask = cv2.warpAffine(mask, inverse_affine, (w_up, h_up))
- # remove the black borders
- inv_mask_erosion = cv2.erode(
- inv_mask, np.ones((int(2 * self.upscale_factor), int(2 * self.upscale_factor)), np.uint8))
- pasted_face = inv_mask_erosion[:, :, None] * inv_restored
- total_face_area = np.sum(inv_mask_erosion) # // 3
- # add border
- if draw_box:
- h, w = face_size
- mask_border = np.ones((h, w, 3), dtype=np.float32)
- border = int(1400/np.sqrt(total_face_area))
- mask_border[border:h-border, border:w-border,:] = 0
- inv_mask_border = cv2.warpAffine(mask_border, inverse_affine, (w_up, h_up))
- inv_mask_borders.append(inv_mask_border)
- # compute the fusion edge based on the area of face
- w_edge = int(total_face_area**0.5) // 20
- erosion_radius = w_edge * 2
- inv_mask_center = cv2.erode(inv_mask_erosion, np.ones((erosion_radius, erosion_radius), np.uint8))
- blur_size = w_edge * 2
- inv_soft_mask = cv2.GaussianBlur(inv_mask_center, (blur_size + 1, blur_size + 1), 0)
- if len(upsample_img.shape) == 2: # upsample_img is gray image
- upsample_img = upsample_img[:, :, None]
- inv_soft_mask = inv_soft_mask[:, :, None]
-
- # parse mask
- if self.use_parse:
- # inference
- face_input = cv2.resize(restored_face, (512, 512), interpolation=cv2.INTER_LINEAR)
- face_input = img2tensor(face_input.astype('float32') / 255., bgr2rgb=True, float32=True)
- normalize(face_input, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
- face_input = torch.unsqueeze(face_input, 0).to(self.device)
- with torch.no_grad():
- out = self.face_parse(face_input)[0]
- out = out.argmax(dim=1).squeeze().cpu().numpy()
-
- parse_mask = np.zeros(out.shape)
- MASK_COLORMAP = [0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 0, 0, 0]
- for idx, color in enumerate(MASK_COLORMAP):
- parse_mask[out == idx] = color
- # blur the mask
- parse_mask = cv2.GaussianBlur(parse_mask, (101, 101), 11)
- parse_mask = cv2.GaussianBlur(parse_mask, (101, 101), 11)
- # remove the black borders
- thres = 10
- parse_mask[:thres, :] = 0
- parse_mask[-thres:, :] = 0
- parse_mask[:, :thres] = 0
- parse_mask[:, -thres:] = 0
- parse_mask = parse_mask / 255.
-
- parse_mask = cv2.resize(parse_mask, face_size)
- parse_mask = cv2.warpAffine(parse_mask, inverse_affine, (w_up, h_up), flags=3)
- inv_soft_parse_mask = parse_mask[:, :, None]
- # pasted_face = inv_restored
- fuse_mask = (inv_soft_parse_mask 256: # 16-bit image
- upsample_img = upsample_img.astype(np.uint16)
- else:
- upsample_img = upsample_img.astype(np.uint8)
-
- # draw bounding box
- if draw_box:
- # upsample_input_img = cv2.resize(input_img, (w_up, h_up))
- img_color = np.ones([*upsample_img.shape], dtype=np.float32)
- img_color[:,:,0] = 0
- img_color[:,:,1] = 255
- img_color[:,:,2] = 0
- for inv_mask_border in inv_mask_borders:
- upsample_img = inv_mask_border * img_color + (1 - inv_mask_border) * upsample_img
- # upsample_input_img = inv_mask_border * img_color + (1 - inv_mask_border) * upsample_input_img
-
- if save_path is not None:
- path = os.path.splitext(save_path)[0]
- save_path = f'{path}.{self.save_ext}'
- imwrite(upsample_img, save_path)
- return upsample_img
-
- def clean_all(self):
- self.all_landmarks_5 = []
- self.restored_faces = []
- self.affine_matrices = []
- self.cropped_faces = []
- self.inverse_affine_matrices = []
- self.det_faces = []
- self.pad_input_imgs = []
\ No newline at end of file
diff --git a/spaces/Freiburg-AI-Research/dermoscopic_image_generation/glide_text2im/__init__.py b/spaces/Freiburg-AI-Research/dermoscopic_image_generation/glide_text2im/__init__.py
deleted file mode 100644
index a3c197bb932cfc9cf3447b7a3b52ce76db262fc9..0000000000000000000000000000000000000000
--- a/spaces/Freiburg-AI-Research/dermoscopic_image_generation/glide_text2im/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-"""
-A codebase for performing model inference with a text-conditional diffusion model.
-"""
diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py
deleted file mode 100644
index 230181cbeeb9c070dad926892f62d8f482d0ab1e..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py
+++ /dev/null
@@ -1,4 +0,0 @@
-_base_ = './mask_rcnn_r50_caffe_fpn_1x_coco.py'
-model = dict(
- pretrained='open-mmlab://detectron2/resnet101_caffe',
- backbone=dict(depth=101))
diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py
deleted file mode 100644
index 1420b97a4bd0dc0f5451623697666012a2de635c..0000000000000000000000000000000000000000
--- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py
+++ /dev/null
@@ -1,9 +0,0 @@
-_base_ = [
- '../_base_/models/deeplabv3plus_r50-d8.py',
- '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
- '../_base_/schedules/schedule_80k.py'
-]
-model = dict(
- decode_head=dict(align_corners=True),
- auxiliary_head=dict(align_corners=True),
- test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/modules/streaming.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/modules/streaming.py
deleted file mode 100644
index fba06936294ca15d72acd2d44f9dbda39a638107..0000000000000000000000000000000000000000
--- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/modules/streaming.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-"""
-Streaming module API that should be implemented by all Streaming components,
-"""
-
-from contextlib import contextmanager
-import typing as tp
-from torch import nn
-import torch
-
-
-State = tp.Dict[str, torch.Tensor]
-
-
-class StreamingModule(nn.Module):
- """Common API for streaming components.
-
- Each streaming component has a streaming state, which is just a dict[str, Tensor].
- By convention, the first dim of each tensor must be the batch size.
- Don't use dots in the key names, as this would clash with submodules
- (like in state_dict).
-
- If `self._is_streaming` is True, the component should use and remember
- the proper state inside `self._streaming_state`.
-
- To set a streaming component in streaming state, use
-
- with module.streaming():
- ...
-
- This will automatically reset the streaming state when exiting the context manager.
- This also automatically propagates to all streaming children module.
-
- Some module might also implement the `StreamingModule.flush` method, although
- this one is trickier, as all parents module must be StreamingModule and implement
- it as well for it to work properly. See `StreamingSequential` after.
- """
- def __init__(self) -> None:
- super().__init__()
- self._streaming_state: State = {}
- self._is_streaming = False
-
- def _apply_named_streaming(self, fn: tp.Any):
- for name, module in self.named_modules():
- if isinstance(module, StreamingModule):
- fn(name, module)
-
- def _set_streaming(self, streaming: bool):
- def _set_streaming(name, module):
- module._is_streaming = streaming
- self._apply_named_streaming(_set_streaming)
-
- @contextmanager
- def streaming(self):
- """Context manager to enter streaming mode. Reset streaming state on exit."""
- self._set_streaming(True)
- try:
- yield
- finally:
- self._set_streaming(False)
- self.reset_streaming()
-
- def reset_streaming(self):
- """Reset the streaming state."""
- def _reset(name: str, module: StreamingModule):
- module._streaming_state.clear()
-
- self._apply_named_streaming(_reset)
-
- def get_streaming_state(self) -> State:
- """Return the streaming state, including that of sub-modules."""
- state: State = {}
-
- def _add(name: str, module: StreamingModule):
- if name:
- name += "."
- for key, value in module._streaming_state.items():
- state[name + key] = value
-
- self._apply_named_streaming(_add)
- return state
-
- def set_streaming_state(self, state: State):
- """Set the streaming state, including that of sub-modules."""
- state = dict(state)
-
- def _set(name: str, module: StreamingModule):
- if name:
- name += "."
- module._streaming_state.clear()
- for key, value in list(state.items()):
- # complexity is not ideal here, but probably fine.
- if key.startswith(name):
- local_key = key[len(name):]
- if '.' not in local_key:
- module._streaming_state[local_key] = value
- del state[key]
-
- self._apply_named_streaming(_set)
- assert len(state) == 0, list(state.keys())
-
- def flush(self, x: tp.Optional[torch.Tensor] = None):
- """Flush any remaining outputs that were waiting for completion.
- Typically, for convolutions, this will add the final padding
- and process the last buffer.
-
- This should take an optional argument `x`, which will be provided
- if a module before this one in the streaming pipeline has already
- spitted out a flushed out buffer.
- """
- if x is None:
- return None
- else:
- return self(x)
-
-
-class StreamingSequential(StreamingModule, nn.Sequential):
- """A streaming compatible alternative of `nn.Sequential`.
- """
- def flush(self, x: tp.Optional[torch.Tensor] = None):
- for module in self:
- if isinstance(module, StreamingModule):
- x = module.flush(x)
- elif x is not None:
- x = module(x)
- return x
diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/scripts/normalize_and_filter_text.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/scripts/normalize_and_filter_text.py
deleted file mode 100644
index c2bd16efb530af5af3f72ab0edb3044b4e9fcd5c..0000000000000000000000000000000000000000
--- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/wav2vec/unsupervised/scripts/normalize_and_filter_text.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import argparse
-import fasttext as ft
-import os
-import regex
-import sys
-
-
-def get_parser():
- parser = argparse.ArgumentParser(
- description="reads text from stdin and outputs normalized, lid-filtered version to stdout"
- )
- parser.add_argument(
- "--fasttext-model",
- help="path to fasttext model",
- default="lid.187.bin",
- )
- parser.add_argument("--lang", help="language id", required=True)
- parser.add_argument(
- "--lid-threshold",
- type=float,
- help="threshold for this lang id probability",
- default=0.4,
- )
-
- return parser
-
-
-def main():
- parser = get_parser()
- args = parser.parse_args()
- filter_r = regex.compile(r"[^\p{L}\p{N}\p{M}\' \-]")
-
- lg = args.lang.lower()
- lg_label = f"__label__{lg}"
- thresh = args.lid_threshold
-
- if os.path.exists(args.fasttext_model):
- model = ft.load_model(args.fasttext_model)
- else:
- print(
- f"fasttext language id model {args.fasttext_model} not found. Proceeding without language filtering. "
- f"To enable language filtering, please download the latest language id model "
- f"from https://fasttext.cc/docs/en/language-identification.html",
- file=sys.stderr,
- )
- model = None
-
- for line in sys.stdin:
- line = line.strip()
- line = filter_r.sub(" ", line)
- line = " ".join(line.split())
-
- if model is not None:
- lid, prob = model.predict(line, k=100)
- try:
- target_idx = lid.index(lg_label)
- except ValueError:
- continue
- if target_idx == 0 or prob[target_idx] >= thresh:
- print(line)
- else:
- print(line)
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/utils/prepare_iitm_data_tts.py b/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/utils/prepare_iitm_data_tts.py
deleted file mode 100644
index 9a9bb04e2e0c44fb274c150a7ecede394aa16822..0000000000000000000000000000000000000000
--- a/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/utils/prepare_iitm_data_tts.py
+++ /dev/null
@@ -1,110 +0,0 @@
-import os
-from glob import glob
-import re
-import string
-import random
-
-
-def replace_extra_chars(line):
- line = line.replace("(", "").replace(
- ")", ""
- ) # .replace('\u200d', ' ').replace('\ufeff', ' ').replace('\u200c', ' ').replace('\u200e', ' ')
- # line = line.replace('“', ' ').replace('”', ' ').replace(':', ' ')
-
- return line.strip()
-
-
-def write_txt(content, filename):
- with open(filename, "w+", encoding="utf-8") as f:
- f.write(content)
-
-
-def save_train_test_valid_split(annotations_txt, num_samples_valid, num_samples_test):
- with open(annotations_txt, encoding="utf-8") as f:
- all_lines = [line.strip() for line in f.readlines()]
- test_val_indices = random.sample(
- range(len(all_lines)), num_samples_valid + num_samples_test
- )
- valid_ix = test_val_indices[:num_samples_valid]
- test_ix = test_val_indices[num_samples_valid:]
- train = [line for i, line in enumerate(all_lines) if i not in test_val_indices]
- valid = [line for i, line in enumerate(all_lines) if i in valid_ix]
- test = [line for i, line in enumerate(all_lines) if i in test_ix]
-
- print(f"Num samples in train: {len(train)}")
- print(f"Num samples in valid: {len(valid)}")
- print(f"Num samples in test: {len(test)}")
-
- out_dir_path = "/".join(annotations_txt.split("/")[:-1])
- with open(os.path.join(out_dir_path, "train.txt"), "w+", encoding="utf-8") as f:
- for line in train:
- print(line, file=f)
- with open(os.path.join(out_dir_path, "valid.txt"), "w+", encoding="utf-8") as f:
- for line in valid:
- print(line, file=f)
- with open(os.path.join(out_dir_path, "test.txt"), "w+", encoding="utf-8") as f:
- for line in test:
- print(line, file=f)
- print(f"train, test and valid txts saved in {out_dir_path}")
-
-
-def save_txts_from_txt_done_data(
- text_path,
- wav_path_for_annotations_txt,
- out_path_for_txts,
- num_samples_valid,
- num_samples_test,
-):
- outfile = os.path.join(out_path_for_txts, "annotations.txt")
- file_lines = open(text_path).read().splitlines()
- # print(file_lines[0])
-
- file_lines = [replace_extra_chars(line) for line in file_lines]
- # print(file_lines[0])
-
- fnames, ftexts = [], []
- for line in file_lines:
- elems = line.split('"')
- fnames.append(elems[0].strip())
- ftexts.append(elems[1].strip())
-
- all_chars = list(set("".join(ftexts)))
- punct_with_space = [i for i in all_chars if i in list(string.punctuation)] + [" "]
- chars = [i for i in all_chars if i not in punct_with_space if i.strip()]
- chars = "".join(chars)
- punct_with_space = "".join(punct_with_space)
- print(chars)
- print(punct_with_space)
-
- outfile_f = open(outfile, "w", encoding="utf-8")
- for f, t in zip(fnames, ftexts):
- print(
- os.path.join(wav_path_for_annotations_txt, f) + ".wav",
- t,
- sep="|",
- file=outfile_f,
- )
-
- write_txt(punct_with_space, os.path.join(out_path_for_txts, "punc.txt"))
- write_txt(chars, os.path.join(out_path_for_txts, "chars.txt"))
-
- save_train_test_valid_split(
- annotations_txt=outfile,
- num_samples_valid=num_samples_valid,
- num_samples_test=num_samples_test,
- )
-
-
-if __name__ == "__main__":
- text_path = "path/to/txt.done.data"
- out_path_for_txts = "vakyansh-tts/data/training/"
- wav_path_for_annotations_txt = "vakyansh-tts/data/training/wav_16K"
- num_samples_valid = 400
- num_samples_test = 50
- save_txts_from_txt_done_data(
- text_path,
- wav_path_for_annotations_txt,
- out_path_for_txts,
- num_samples_valid,
- num_samples_test,
- )
diff --git a/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/utils/glow/prepare_iitm_data_glow_en.py b/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/utils/glow/prepare_iitm_data_glow_en.py
deleted file mode 100644
index 827bdc98f2d84090cc445d786ff8fc1e5ff3d829..0000000000000000000000000000000000000000
--- a/spaces/Harveenchadha/Vakyansh-Odia-TTS/ttsv/utils/glow/prepare_iitm_data_glow_en.py
+++ /dev/null
@@ -1,135 +0,0 @@
-import os
-from glob import glob
-import re
-import string
-import argparse
-import json
-import random
-random.seed(42)
-
-def replace_extra_chars(line):
- line = line.replace("(", "").replace(
- ")", ""
- ) # .replace('\u200d', ' ').replace('\ufeff', ' ').replace('\u200c', ' ').replace('\u200e', ' ')
- # line = line.replace('“', ' ').replace('”', ' ').replace(':', ' ')
-
- return line.strip()
-
-
-def write_txt(content, filename):
- with open(filename, "w+", encoding="utf-8") as f:
- f.write(content)
-
-
-def save_train_test_valid_split(annotations_txt, num_samples_valid, num_samples_test):
- with open(annotations_txt, encoding="utf-8") as f:
- all_lines = [line.strip() for line in f.readlines()]
- test_val_indices = random.sample(
- range(len(all_lines)), num_samples_valid + num_samples_test
- )
- valid_ix = test_val_indices[:num_samples_valid]
- test_ix = test_val_indices[num_samples_valid:]
- train = [line for i, line in enumerate(all_lines) if i not in test_val_indices]
- valid = [line for i, line in enumerate(all_lines) if i in valid_ix]
- test = [line for i, line in enumerate(all_lines) if i in test_ix]
-
- print(f"Num samples in train: {len(train)}")
- print(f"Num samples in valid: {len(valid)}")
- print(f"Num samples in test: {len(test)}")
-
- out_dir_path = "/".join(annotations_txt.split("/")[:-1])
- with open(os.path.join(out_dir_path, "train.txt"), "w+", encoding="utf-8") as f:
- for line in train:
- print(line, file=f)
- with open(os.path.join(out_dir_path, "valid.txt"), "w+", encoding="utf-8") as f:
- for line in valid:
- print(line, file=f)
- with open(os.path.join(out_dir_path, "test.txt"), "w+", encoding="utf-8") as f:
- for line in test:
- print(line, file=f)
- print(f"train, test and valid txts saved in {out_dir_path}")
-
-
-def save_txts_from_txt_done_data(
- text_path,
- wav_path_for_annotations_txt,
- out_path_for_txts,
- num_samples_valid,
- num_samples_test,
-):
- outfile = os.path.join(out_path_for_txts, "annotations.txt")
- with open(text_path) as file:
- file_lines = file.readlines()
-
- # print(file_lines[0])
-
- file_lines = [replace_extra_chars(line) for line in file_lines]
- # print(file_lines[0])
-
- fnames, ftexts = [], []
- for line in file_lines:
- elems = line.split('"')
- fnames.append(elems[0].strip())
- ftexts.append(elems[1].strip().lower().replace('‘','\'').replace('’','\''))
-
- all_chars = list(set("".join(ftexts)))
- punct_with_space = [i for i in all_chars if i in list(string.punctuation)] + [" "]
- chars = [i for i in all_chars if i not in punct_with_space if i.strip()]
- chars = "".join(chars)
- punct_with_space = "".join(punct_with_space)#.replace("'",r"\'")
-
- with open('../../config/glow/base_blank.json', 'r') as jfile:
- json_config = json.load(jfile)
-
- json_config["data"]["chars"] = chars
- json_config["data"]["punc"] = punct_with_space
- json_config["data"]["training_files"]=out_path_for_txts + '/train.txt'
- json_config["data"]["validation_files"] = out_path_for_txts + '/valid.txt'
- new_config_name = out_path_for_txts.split('/')[-1]
- with open(f'../../config/glow/{new_config_name}.json','w+') as jfile:
- json.dump(json_config, jfile)
-
- print(f"Characters: {chars}")
- print(f"Len of vocab: {len(chars)}")
- print(f"Punctuation: {punct_with_space}")
- print(f"Config file is stored at ../../config/glow/{new_config_name}.json")
-
- outfile_f = open(outfile, "w+", encoding="utf-8")
- for f, t in zip(fnames, ftexts):
- print(
- os.path.join(wav_path_for_annotations_txt, f) + ".wav",
- t,
- sep="|",
- file=outfile_f,
- )
- outfile_f.close()
- write_txt(punct_with_space, os.path.join(out_path_for_txts, "punc.txt"))
- write_txt(chars, os.path.join(out_path_for_txts, "chars.txt"))
-
- save_train_test_valid_split(
- annotations_txt=outfile,
- num_samples_valid=num_samples_valid,
- num_samples_test=num_samples_test,
- )
-
-
-
-
-if __name__ == "__main__":
-
-
- parser = argparse.ArgumentParser()
- parser.add_argument("-i", "--text-path", type=str, required=True)
- parser.add_argument("-o", "--output-path", type=str, required=True)
- parser.add_argument("-w", "--wav-path", type=str, required=True)
- parser.add_argument("-v", "--valid-samples", type=int, default = 100)
- parser.add_argument("-t", "--test-samples", type=int, default = 10)
- args = parser.parse_args()
-
- save_txts_from_txt_done_data(
- args.text_path,
- args.wav_path,
- args.output_path,
- args.valid_samples,
- args.test_samples,
- )
diff --git a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/index.5cfaf6ac.js b/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/index.5cfaf6ac.js
deleted file mode 100644
index a333218ffca2abe6c116c7dea63fe2146a9ee94a..0000000000000000000000000000000000000000
--- a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/index.5cfaf6ac.js
+++ /dev/null
@@ -1,2 +0,0 @@
-import{S as Z,i as A,s as D,w as R,b as d,f as v,g as b,x as H,n as h,B as z,e as g,a as j,t as N,Y as S,h as E,C as K,d as C,P as O,c as L,m as M,j as k,k as p,o as B,F as Q,R as W,T as X,U as x,V as $,D as T,E as Y}from"./index.396f4a72.js";import{B as ee}from"./BlockLabel.37da86a3.js";function le(a){let e,n;return{c(){e=R("svg"),n=R("path"),d(n,"fill","currentColor"),d(n,"d","M4 2H2v26a2 2 0 0 0 2 2h26v-2H4v-3h22v-8H4v-4h14V5H4Zm20 17v4H4v-4ZM16 7v4H4V7Z"),d(e,"xmlns","http://www.w3.org/2000/svg"),d(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),d(e,"aria-hidden","true"),d(e,"role","img"),d(e,"class","iconify iconify--carbon"),d(e,"width","100%"),d(e,"height","100%"),d(e,"preserveAspectRatio","xMidYMid meet"),d(e,"viewBox","0 0 32 32")},m(l,t){v(l,e,t),b(e,n)},p:H,i:H,o:H,d(l){l&&h(e)}}}class G extends Z{constructor(e){super(),A(this,e,null,le,D,{})}}function q(a,e,n){const l=a.slice();return l[3]=e[n],l}function F(a){let e,n=a[0].confidences,l=[];for(let t=0;t{"value"in i&&n(0,l=i.value),"show_label"in i&&n(1,t=i.show_label),"color"in i&&n(2,o=i.color)},[l,t,o]}class oe extends Z{constructor(e){super(),A(this,e,ne,te,D,{value:0,show_label:1,color:2})}}function U(a){let e,n;return e=new ee({props:{Icon:G,label:a[4],disable:typeof a[5].container=="boolean"&&!a[5].container}}),{c(){L(e.$$.fragment)},m(l,t){M(e,l,t),n=!0},p(l,t){const o={};t&16&&(o.label=l[4]),t&32&&(o.disable=typeof l[5].container=="boolean"&&!l[5].container),e.$set(o)},i(l){n||(k(e.$$.fragment,l),n=!0)},o(l){p(e.$$.fragment,l),n=!1},d(l){B(e,l)}}}function ie(a){let e,n,l,t;return l=new G({}),{c(){e=g("div"),n=g("div"),L(l.$$.fragment),d(n,"class","h-5 dark:text-white opacity-50"),d(e,"class","h-full min-h-[6rem] flex justify-center items-center")},m(o,i){v(o,e,i),b(e,n),M(l,n,null),t=!0},p:H,i(o){t||(k(l.$$.fragment,o),t=!0)},o(o){p(l.$$.fragment,o),t=!1},d(o){o&&h(e),B(l)}}}function ae(a){let e,n;return e=new oe({props:{value:a[3],show_label:a[7],color:a[2]}}),{c(){L(e.$$.fragment)},m(l,t){M(e,l,t),n=!0},p(l,t){const o={};t&8&&(o.value=l[3]),t&128&&(o.show_label=l[7]),t&4&&(o.color=l[2]),e.$set(o)},i(l){n||(k(e.$$.fragment,l),n=!0)},o(l){p(e.$$.fragment,l),n=!1},d(l){B(e,l)}}}function se(a){let e,n,l,t,o,i,s;const c=[a[6]];let w={};for(let r=0;r{u=null}),Y());let V=t;t=y(r),t===V?f[t].p(r,m):(T(),p(f[V],1,1,()=>{f[V]=null}),Y(),o=f[t],o?o.p(r,m):(o=f[t]=_[t](r),o.c()),k(o,1),o.m(i.parentNode,i))},i(r){s||(k(e.$$.fragment,r),k(u),k(o),s=!0)},o(r){p(e.$$.fragment,r),p(u),p(o),s=!1},d(r){B(e,r),r&&h(n),u&&u.d(r),r&&h(l),f[t].d(r),r&&h(i)}}}function fe(a){let e,n;return e=new O({props:{test_id:"label",visible:a[1],elem_id:a[0],disable:typeof a[5].container=="boolean"&&!a[5].container,$$slots:{default:[se]},$$scope:{ctx:a}}}),{c(){L(e.$$.fragment)},m(l,t){M(e,l,t),n=!0},p(l,[t]){const o={};t&2&&(o.visible=l[1]),t&1&&(o.elem_id=l[0]),t&32&&(o.disable=typeof l[5].container=="boolean"&&!l[5].container),t&764&&(o.$$scope={dirty:t,ctx:l}),e.$set(o)},i(l){n||(k(e.$$.fragment,l),n=!0)},o(l){p(e.$$.fragment,l),n=!1},d(l){B(e,l)}}}function re(a,e,n){let{elem_id:l=""}=e,{visible:t=!0}=e,{color:o=void 0}=e,{value:i}=e,{label:s="Label"}=e,{style:c={}}=e,{loading_status:w}=e,{show_label:u}=e;const _=Q();return a.$$set=f=>{"elem_id"in f&&n(0,l=f.elem_id),"visible"in f&&n(1,t=f.visible),"color"in f&&n(2,o=f.color),"value"in f&&n(3,i=f.value),"label"in f&&n(4,s=f.label),"style"in f&&n(5,c=f.style),"loading_status"in f&&n(6,w=f.loading_status),"show_label"in f&&n(7,u=f.show_label)},a.$$.update=()=>{a.$$.dirty&8&&_("change")},[l,t,o,i,s,c,w,u]}class ce extends Z{constructor(e){super(),A(this,e,re,fe,D,{elem_id:0,visible:1,color:2,value:3,label:4,style:5,loading_status:6,show_label:7})}}var _e=ce;const be=["static"],me=a=>({type:"{ label: string; confidences?: Array<{ label: string; confidence: number }>",description:"output label and optional set of confidences per label"});export{_e as Component,me as document,be as modes};
-//# sourceMappingURL=index.5cfaf6ac.js.map
diff --git a/spaces/HugoDzz/super-godot-galaxy/README.md b/spaces/HugoDzz/super-godot-galaxy/README.md
deleted file mode 100644
index 5e3448e05ad3ea2032ffc8dc693ea38d2819af49..0000000000000000000000000000000000000000
--- a/spaces/HugoDzz/super-godot-galaxy/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
----
-title: Super Godot Galaxy
-emoji: 💫
-colorFrom: purple
-colorTo: purple
-sdk: static
-pinned: true
-license: mit
-app_file: build/index.html
-custom_headers:
- cross-origin-embedder-policy: require-corp
- cross-origin-opener-policy: same-origin
- cross-origin-resource-policy: cross-origin
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/models/transformer/__init__.py b/spaces/ICML2022/OFA/fairseq/fairseq/models/transformer/__init__.py
deleted file mode 100644
index 681fca3d4553f6832a65f61fc186793bc4ee0679..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/fairseq/models/transformer/__init__.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright (c) Facebook Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-"""isort:skip_file"""
-
-from .transformer_config import (
- TransformerConfig,
- DEFAULT_MAX_SOURCE_POSITIONS,
- DEFAULT_MAX_TARGET_POSITIONS,
- DEFAULT_MIN_PARAMS_TO_WRAP,
-)
-from .transformer_decoder import TransformerDecoder, TransformerDecoderBase, Linear
-from .transformer_encoder import TransformerEncoder, TransformerEncoderBase
-from .transformer_legacy import (
- TransformerModel,
- base_architecture,
- tiny_architecture,
- transformer_iwslt_de_en,
- transformer_wmt_en_de,
- transformer_vaswani_wmt_en_de_big,
- transformer_vaswani_wmt_en_fr_big,
- transformer_wmt_en_de_big,
- transformer_wmt_en_de_big_t2t,
-)
-from .transformer_base import TransformerModelBase, Embedding
-
-
-__all__ = [
- "TransformerModelBase",
- "TransformerConfig",
- "TransformerDecoder",
- "TransformerDecoderBase",
- "TransformerEncoder",
- "TransformerEncoderBase",
- "TransformerModel",
- "Embedding",
- "Linear",
- "base_architecture",
- "tiny_architecture",
- "transformer_iwslt_de_en",
- "transformer_wmt_en_de",
- "transformer_vaswani_wmt_en_de_big",
- "transformer_vaswani_wmt_en_fr_big",
- "transformer_wmt_en_de_big",
- "transformer_wmt_en_de_big_t2t",
- "DEFAULT_MAX_SOURCE_POSITIONS",
- "DEFAULT_MAX_TARGET_POSITIONS",
- "DEFAULT_MIN_PARAMS_TO_WRAP",
-]
diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/optim/lr_scheduler/tri_stage_lr_scheduler.py b/spaces/ICML2022/OFA/fairseq/fairseq/optim/lr_scheduler/tri_stage_lr_scheduler.py
deleted file mode 100644
index 4d5547c39b14f62acbd4f4b9ab3abfb3009c0e6d..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/fairseq/optim/lr_scheduler/tri_stage_lr_scheduler.py
+++ /dev/null
@@ -1,175 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-from dataclasses import dataclass, field
-from typing import Optional, List, Tuple
-from omegaconf import II
-
-from fairseq.dataclass import FairseqDataclass
-from fairseq.optim.lr_scheduler import FairseqLRScheduler, register_lr_scheduler
-
-
-@dataclass
-class TriStageLRScheduleConfig(FairseqDataclass):
- warmup_steps: int = field(
- default=0,
- metadata={"help": "warmup the learning rate linearly for the first N updates"},
- )
- hold_steps: int = field(
- default=0,
- metadata={"help": "steps in hold stage"},
- )
- decay_steps: int = field(
- default=0,
- metadata={"help": "steps in decay stages"},
- )
- phase_ratio: Optional[Tuple[float, float, float]] = field(
- default=None,
- metadata={
- "help": (
- "if set, automatically sets warmup/hold/decay steps to the ratio "
- "specified here from max_updates. the ratios must add up to 1.0"
- )
- },
- )
- init_lr_scale: float = field(
- default=0.01,
- metadata={"help": "initial learning rate scale during warmup phase"},
- )
- final_lr_scale: float = field(
- default=0.01,
- metadata={"help": "final learning rate scale"},
- )
- max_update: float = II("optimization.max_update")
- lr: List[float] = II("optimization.lr")
-
-
-@register_lr_scheduler("tri_stage", dataclass=TriStageLRScheduleConfig)
-class TriStageLRSchedule(FairseqLRScheduler):
- """Tristage learning rate schedulr
-
- Implement the learning rate scheduler in https://arxiv.org/pdf/1904.08779.pdf
-
- Similar to inverse_squre_root scheduler, but tri_stage learning rate employs
- three stages LR scheduling:
-
- - warmup stage, starting from `lr` * `init_lr_scale`, linearly
- increased to `lr` in `warmup_steps` iterations
-
- - hold stage, after `warmup_steps`, keep the LR as `lr` for `hold_steps`
- iterations
-
- - decay stage, after hold stage, decay LR exponetially to
- `lr` * `final_lr_scale` in `decay_steps`;
- after that LR is keep as `final_lr_scale` * `lr`
-
- During warmup::
-
- init_lr = cfg.init_lr_scale * cfg.lr
- lrs = torch.linspace(init_lr, cfg.lr, cfg.warmup_steps)
- lr = lrs[update_num]
-
- During hold::
-
- lr = cfg.lr
-
- During decay::
-
- decay_factor = - math.log(cfg.final_lr_scale) / cfg.decay_steps
- lr = cfg.lr * exp(- (update_num - warmup_steps - decay_steps) * decay_factor)
-
- After that::
-
- lr = cfg.lr * cfg.final_lr_scale
- """
-
- def __init__(self, cfg: TriStageLRScheduleConfig, optimizer):
- super().__init__(cfg, optimizer)
- if len(cfg.lr) > 1:
- raise ValueError(
- "Cannot use a fixed learning rate schedule with tri-stage lr."
- " Consider --lr-scheduler=fixed instead."
- )
-
- # calculate LR at each point
- self.peak_lr = cfg.lr[0]
- self.init_lr = cfg.init_lr_scale * cfg.lr[0]
- self.final_lr = cfg.final_lr_scale * cfg.lr[0]
-
- if cfg.phase_ratio is not None:
- assert cfg.max_update > 0
- assert sum(cfg.phase_ratio) == 1, "phase ratios must add up to 1"
- self.warmup_steps = int(cfg.max_update * cfg.phase_ratio[0])
- self.hold_steps = int(cfg.max_update * cfg.phase_ratio[1])
- self.decay_steps = int(cfg.max_update * cfg.phase_ratio[2])
- else:
- self.warmup_steps = cfg.warmup_steps
- self.hold_steps = cfg.hold_steps
- self.decay_steps = cfg.decay_steps
-
- assert (
- self.warmup_steps + self.hold_steps + self.decay_steps > 0
- ), "please specify steps or phase_ratio"
-
- self.warmup_rate = (
- (self.peak_lr - self.init_lr) / self.warmup_steps
- if self.warmup_steps != 0
- else 0
- )
- self.decay_factor = -math.log(cfg.final_lr_scale) / self.decay_steps
-
- # initial learning rate
- self.lr = self.init_lr
- self.optimizer.set_lr(self.lr)
-
- def _decide_stage(self, update_step):
- """
- return stage, and the corresponding steps within the current stage
- """
- if update_step < self.warmup_steps:
- # warmup state
- return 0, update_step
-
- offset = self.warmup_steps
-
- if update_step < offset + self.hold_steps:
- # hold stage
- return 1, update_step - offset
-
- offset += self.hold_steps
-
- if update_step <= offset + self.decay_steps:
- # decay stage
- return 2, update_step - offset
-
- offset += self.decay_steps
-
- # still here ? constant lr stage
- return 3, update_step - offset
-
- def step(self, epoch, val_loss=None):
- """Update the learning rate at the end of the given epoch."""
- super().step(epoch, val_loss)
- # we don't change the learning rate at epoch boundaries
- return self.optimizer.get_lr()
-
- def step_update(self, num_updates):
- """Update the learning rate after each update."""
- stage, steps_in_stage = self._decide_stage(num_updates)
- if stage == 0:
- self.lr = self.init_lr + self.warmup_rate * steps_in_stage
- elif stage == 1:
- self.lr = self.peak_lr
- elif stage == 2:
- self.lr = self.peak_lr * math.exp(-self.decay_factor * steps_in_stage)
- elif stage == 3:
- self.lr = self.final_lr
- else:
- raise ValueError("Undefined stage")
-
- self.optimizer.set_lr(self.lr)
-
- return self.lr
diff --git a/spaces/ICML2022/resefa/utils/visualizers/__init__.py b/spaces/ICML2022/resefa/utils/visualizers/__init__.py
deleted file mode 100644
index df9fbaca361c3802c0f8221c053b61bf66a2456f..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/resefa/utils/visualizers/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-# python3.7
-"""Collects all visualizers."""
-
-from .grid_visualizer import GridVisualizer
-from .gif_visualizer import GifVisualizer
-from .html_visualizer import HtmlVisualizer
-from .html_visualizer import HtmlReader
-from .video_visualizer import VideoVisualizer
-from .video_visualizer import VideoReader
-
-__all__ = [
- 'GridVisualizer', 'GifVisualizer', 'HtmlVisualizer', 'HtmlReader',
- 'VideoVisualizer', 'VideoReader'
-]
diff --git a/spaces/Ignahugging/Plants_classification/README.md b/spaces/Ignahugging/Plants_classification/README.md
deleted file mode 100644
index 4a0d55a0ef463f16816b979840785872edb4d59e..0000000000000000000000000000000000000000
--- a/spaces/Ignahugging/Plants_classification/README.md
+++ /dev/null
@@ -1,45 +0,0 @@
----
-title: Plants_classification
-emoji: 📈
-colorFrom: green
-colorTo: purple
-sdk: gradio
-app_file: app.py
-pinned: false
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio`, `streamlit`, or `static`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
-Path is relative to the root of the repository.
-
-`models`: _List[string]_
-HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space.
-Will be parsed automatically from your code if not specified here.
-
-`datasets`: _List[string]_
-HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space.
-Will be parsed automatically from your code if not specified here.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/JUNGU/VToonify/vtoonify/model/raft/core/corr.py b/spaces/JUNGU/VToonify/vtoonify/model/raft/core/corr.py
deleted file mode 100644
index 40214aa5e6f0392a732eacab9d9cb0cbfb4669f3..0000000000000000000000000000000000000000
--- a/spaces/JUNGU/VToonify/vtoonify/model/raft/core/corr.py
+++ /dev/null
@@ -1,91 +0,0 @@
-import torch
-import torch.nn.functional as F
-from model.raft.core.utils.utils import bilinear_sampler, coords_grid
-
-try:
- import alt_cuda_corr
-except:
- # alt_cuda_corr is not compiled
- pass
-
-
-class CorrBlock:
- def __init__(self, fmap1, fmap2, num_levels=4, radius=4):
- self.num_levels = num_levels
- self.radius = radius
- self.corr_pyramid = []
-
- # all pairs correlation
- corr = CorrBlock.corr(fmap1, fmap2)
-
- batch, h1, w1, dim, h2, w2 = corr.shape
- corr = corr.reshape(batch*h1*w1, dim, h2, w2)
-
- self.corr_pyramid.append(corr)
- for i in range(self.num_levels-1):
- corr = F.avg_pool2d(corr, 2, stride=2)
- self.corr_pyramid.append(corr)
-
- def __call__(self, coords):
- r = self.radius
- coords = coords.permute(0, 2, 3, 1)
- batch, h1, w1, _ = coords.shape
-
- out_pyramid = []
- for i in range(self.num_levels):
- corr = self.corr_pyramid[i]
- dx = torch.linspace(-r, r, 2*r+1, device=coords.device)
- dy = torch.linspace(-r, r, 2*r+1, device=coords.device)
- delta = torch.stack(torch.meshgrid(dy, dx), axis=-1)
-
- centroid_lvl = coords.reshape(batch*h1*w1, 1, 1, 2) / 2**i
- delta_lvl = delta.view(1, 2*r+1, 2*r+1, 2)
- coords_lvl = centroid_lvl + delta_lvl
-
- corr = bilinear_sampler(corr, coords_lvl)
- corr = corr.view(batch, h1, w1, -1)
- out_pyramid.append(corr)
-
- out = torch.cat(out_pyramid, dim=-1)
- return out.permute(0, 3, 1, 2).contiguous().float()
-
- @staticmethod
- def corr(fmap1, fmap2):
- batch, dim, ht, wd = fmap1.shape
- fmap1 = fmap1.view(batch, dim, ht*wd)
- fmap2 = fmap2.view(batch, dim, ht*wd)
-
- corr = torch.matmul(fmap1.transpose(1,2), fmap2)
- corr = corr.view(batch, ht, wd, 1, ht, wd)
- return corr / torch.sqrt(torch.tensor(dim).float())
-
-
-class AlternateCorrBlock:
- def __init__(self, fmap1, fmap2, num_levels=4, radius=4):
- self.num_levels = num_levels
- self.radius = radius
-
- self.pyramid = [(fmap1, fmap2)]
- for i in range(self.num_levels):
- fmap1 = F.avg_pool2d(fmap1, 2, stride=2)
- fmap2 = F.avg_pool2d(fmap2, 2, stride=2)
- self.pyramid.append((fmap1, fmap2))
-
- def __call__(self, coords):
- coords = coords.permute(0, 2, 3, 1)
- B, H, W, _ = coords.shape
- dim = self.pyramid[0][0].shape[1]
-
- corr_list = []
- for i in range(self.num_levels):
- r = self.radius
- fmap1_i = self.pyramid[0][0].permute(0, 2, 3, 1).contiguous()
- fmap2_i = self.pyramid[i][1].permute(0, 2, 3, 1).contiguous()
-
- coords_i = (coords / 2**i).reshape(B, 1, H, W, 2).contiguous()
- corr, = alt_cuda_corr.forward(fmap1_i, fmap2_i, coords_i, r)
- corr_list.append(corr.squeeze(1))
-
- corr = torch.stack(corr_list, dim=1)
- corr = corr.reshape(B, -1, H, W)
- return corr / torch.sqrt(torch.tensor(dim).float())
diff --git a/spaces/Jasmine0725/text_generator/app.py b/spaces/Jasmine0725/text_generator/app.py
deleted file mode 100644
index 25e0e6e71af7c7766fc802ad69761426d9bbda4b..0000000000000000000000000000000000000000
--- a/spaces/Jasmine0725/text_generator/app.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import gradio as gr
-from gradio .mix import Parallel
-
-title="My First Text Generator"
-description="Input text."
-example=[
- ["Once upon a time"]
-]
-
-model1=gr.Interface.load("huggingface/EleutherAI/gpt-j-6B")
-model2=gr.Interface.load("huggingface/gpt2")
-model3=gr.Interface.load("huggingface/EleutherAI/gpt-neo-125M")
-
-gr.Parallel(model1 ,model2, model3, title=title, description=description).launch()
\ No newline at end of file
diff --git a/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/version.py b/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/version.py
deleted file mode 100644
index 3c30a9a5d2c3af85b06034f080d6f9f7e0a53e7e..0000000000000000000000000000000000000000
--- a/spaces/Jasonyoyo/CodeFormer/CodeFormer/basicsr/version.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# GENERATED VERSION FILE
-# TIME: Sun Aug 7 15:14:26 2022
-__version__ = '1.3.2'
-__gitsha__ = '6f94023'
-version_info = (1, 3, 2)
diff --git a/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/repo.py b/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/repo.py
deleted file mode 100644
index 2788de5b06a744bc436df677a973d89c26489a8a..0000000000000000000000000000000000000000
--- a/spaces/JohnSmith9982/ChuanhuChatGPT_Beta/modules/repo.py
+++ /dev/null
@@ -1,239 +0,0 @@
-# -*- coding:utf-8 -*-
-import os
-import sys
-import subprocess
-from functools import lru_cache
-import logging
-import gradio as gr
-import datetime
-
-# This file is mainly used to describe repo version info, execute the git command, python pip command, shell command, etc.
-# Part of the code in this file is referenced from stable-diffusion-webui/modules/launch_utils.py
-
-python = sys.executable
-pip = os.environ.get('PIP', "pip")
-git = os.environ.get('GIT', "git")
-
-# Pypi index url
-index_url = os.environ.get('INDEX_URL', "")
-
-# Whether to default to printing command output
-default_command_live = True
-
-
-def run(command, desc=None, errdesc=None, custom_env=None, live: bool = default_command_live) -> str:
- if desc is not None:
- print(desc)
- run_kwargs = {
- "args": command,
- "shell": True,
- "env": os.environ if custom_env is None else custom_env,
- "encoding": 'utf8',
- "errors": 'ignore',
- }
-
- if not live:
- run_kwargs["stdout"] = run_kwargs["stderr"] = subprocess.PIPE
-
- result = subprocess.run(**run_kwargs)
- if result.returncode != 0:
- error_bits = [
- f"{errdesc or 'Error running command'}.",
- f"Command: {command}",
- f"Error code: {result.returncode}",
- ]
- if result.stdout:
- error_bits.append(f"stdout: {result.stdout}")
- if result.stderr:
- error_bits.append(f"stderr: {result.stderr}")
- raise RuntimeError("\n".join(error_bits))
-
- return (result.stdout or "")
-
-
-def run_pip(command, desc=None, pref=None, live=default_command_live):
- # if args.skip_install:
- # return
-
- index_url_line = f' --index-url {index_url}' if index_url != '' else ''
- return run(
- f'"{python}" -m pip {command} --prefer-binary{index_url_line}',
- desc=f"{pref} Installing {desc}...",
- errdesc=f"Couldn't install {desc}",
- live=live
- )
-
-
-@lru_cache()
-def commit_hash():
- try:
- return subprocess.check_output([git, "rev-parse", "HEAD"], shell=False, encoding='utf8').strip()
- except Exception:
- return ""
-
-def commit_html():
- commit = commit_hash()
- if commit != "":
- short_commit = commit[0:7]
- commit_info = f'{short_commit}'
- else:
- commit_info = "unknown \U0001F615"
- return commit_info
-
-@lru_cache()
-def tag_html():
- try:
- latest_tag = run(f"{git} describe --tags --abbrev=0", live=False).strip()
- try:
- # tag = subprocess.check_output([git, "describe", "--tags", "--exact-match"], shell=False, encoding='utf8').strip()
- tag = run(f"{git} describe --tags --exact-match", live=False).strip()
- except Exception:
- tag = ""
- except Exception:
- tag = ""
-
- if tag == "":
- tag_info = "unknown \U0001F615"
- elif tag == "":
- tag_info = f'{latest_tag}*'
- else:
- tag_info = f'{tag}'
-
- return tag_info
-
-def repo_tag_html():
- commit_version = commit_html()
- tag_version = tag_html()
- return tag_version if tag_version != "unknown \U0001F615" else commit_version
-
-def versions_html():
- python_version = ".".join([str(x) for x in sys.version_info[0:3]])
- repo_version = repo_tag_html()
- return f"""
- Python: {python_version}
- •
- Gradio: {gr.__version__}
- •
- ChuanhuChat: {repo_version}
- """
-
-def version_time():
- try:
- commit_time = subprocess.check_output(f"TZ=UTC {git} log -1 --format=%cd --date='format-local:%Y-%m-%dT%H:%M:%SZ'", shell=True, encoding='utf8').strip()
- # commit_time = run(f"TZ=UTC {git} log -1 --format=%cd --date='format-local:%Y-%m-%dT%H:%M:%SZ'").strip()
- except Exception:
- commit_time = "unknown"
- return commit_time
-
-
-
-def get_current_branch():
- try:
- # branch = run(f"{git} rev-parse --abbrev-ref HEAD").strip()
- branch = subprocess.check_output([git, "rev-parse", "--abbrev-ref", "HEAD"], shell=False, encoding='utf8').strip()
- except Exception:
- branch = ""
- return branch
-
-
-def get_latest_release():
- try:
- import requests
- release = requests.get("https://api.github.com/repos/GaiZhenbiao/ChuanhuChatGPT/releases/latest").json()
- tag = release["tag_name"]
- release_note = release["body"]
- need_pip = release_note.find("requirements reinstall needed") != -1
- except Exception:
- tag = ""
- release_note = ""
- need_pip = False
- return {"tag": tag, "release_note": release_note, "need_pip": need_pip}
-
-def get_tag_commit_hash(tag):
- try:
- import requests
- tags = requests.get("https://api.github.com/repos/GaiZhenbiao/ChuanhuChatGPT/tags").json()
- commit_hash = [x["commit"]["sha"] for x in tags if x["name"] == tag][0]
- except Exception:
- commit_hash = ""
- return commit_hash
-
-def repo_need_stash():
- try:
- return subprocess.check_output([git, "diff-index", "--quiet", "HEAD", "--"], shell=False, encoding='utf8').strip() != ""
- except Exception:
- return True
-
-def background_update():
- # {git} fetch --all && ({git} pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f || ({git} stash && {git} pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f && {git} stash pop)) && {pip} install -r requirements.txt")
- try:
- latest_release = get_latest_release()
- latest_release_tag = latest_release["tag"]
- latest_release_hash = get_tag_commit_hash(latest_release_tag)
- need_pip = latest_release["need_pip"]
- need_stash = repo_need_stash()
-
- timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
- current_branch = get_current_branch()
- updater_branch = f'tmp_{timestamp}'
- backup_branch = f'backup_{timestamp}'
- track_repo = "https://github.com/GaiZhenbiao/ChuanhuChatGPT.git"
- try:
- try:
- run(f"{git} fetch {track_repo}", desc="[Updater] Fetching from github...", live=False)
- except Exception:
- logging.error(f"Update failed in fetching, check your network connection")
- return "failed"
-
- run(f'{git} stash push --include-untracked -m "updater-{timestamp}"',
- desc=f'[Updater] Restoring you local changes on stash updater-{timestamp}', live=False) if need_stash else None
-
- run(f"{git} checkout -b {backup_branch}", live=False)
- run(f"{git} checkout -b {updater_branch}", live=False)
- run(f"{git} reset --hard FETCH_HEAD", live=False)
- run(f"{git} reset --hard {latest_release_hash}", desc=f'[Updater] Checking out {latest_release_tag}...', live=False)
- run(f"{git} checkout {current_branch}", live=False)
-
- try:
- run(f"{git} merge --no-edit {updater_branch} -q", desc=f"[Updater] Trying to apply latest update on version {latest_release_tag}...")
- except Exception:
- logging.error(f"Update failed in merging")
- try:
- run(f"{git} merge --abort", desc="[Updater] Conflict detected, canceling update...")
- run(f"{git} reset --hard {backup_branch}", live=False)
- run(f"{git} branch -D -f {updater_branch}", live=False)
- run(f"{git} branch -D -f {backup_branch}", live=False)
- run(f"{git} stash pop", live=False) if need_stash else None
- logging.error(f"Update failed, but your file was safely reset to the state before the update.")
- return "failed"
- except Exception as e:
- logging.error(f"!!!Update failed in resetting, try to reset your files manually. {e}")
- return "failed"
-
- if need_stash:
- try:
- run(f"{git} stash apply", desc="[Updater] Trying to restore your local modifications...", live=False)
- except Exception:
- run(f"{git} reset --hard {backup_branch}", desc="[Updater] Conflict detected, canceling update...", live=False)
- run(f"{git} branch -D -f {updater_branch}", live=False)
- run(f"{git} branch -D -f {backup_branch}", live=False)
- run(f"{git} stash pop", live=False)
- logging.error(f"Update failed in applying your local changes, but your file was safely reset to the state before the update.")
- return "failed"
- run(f"{git} stash drop", live=False)
-
- run(f"{git} branch -D -f {updater_branch}", live=False)
- run(f"{git} branch -D -f {backup_branch}", live=False)
- except Exception as e:
- logging.error(f"Update failed: {e}")
- return "failed"
- if need_pip:
- try:
- run_pip(f"install -r requirements.txt", pref="[Updater]", desc="requirements", live=False)
- except Exception:
- logging.error(f"Update failed in pip install")
- return "failed"
- return "success"
- except Exception as e:
- logging.error(f"Update failed: {e}")
- return "failed"
diff --git a/spaces/JosephusCheung/ACertainsStrategyTalk/3.html b/spaces/JosephusCheung/ACertainsStrategyTalk/3.html
deleted file mode 100644
index 7bf411b7f4a6d857dca181705802d2b3f596de41..0000000000000000000000000000000000000000
--- a/spaces/JosephusCheung/ACertainsStrategyTalk/3.html
+++ /dev/null
@@ -1,145 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Workflow
-Certains Certains Certains
-* without prior
-preservation
-pretrained
-SD model
-first time
-overfitted
-Certain
-Base
-community
-models
-dreambooth
-dreambooth
-*booru
-auto-genrated
-single prompt
-Certain
-Model
-Certainty
-Certain
-Thing
-LoRA
-& native fine-tuning*
-LoRA
-dreambooth
-Anythingv3
-dream booth
-auto-genrated
-single prompt
- The model is licensed with a CreativeML Open RAIL-M license. The authors claim no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in this license. The license forbids you from sharing any content that violates any laws, produce any harm to a person, disseminate any personal information that would be meant for harm, spread misinformation and target vulnerable groups. For the full list of restrictions please read the license
- """
- )
-
- # image_blocks.launch(share=True, max_threads=1).queue()
- image_blocks.launch()
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/Kevin676/Clone-Your-Voice/vocoder/train.py b/spaces/Kevin676/Clone-Your-Voice/vocoder/train.py
deleted file mode 100644
index f3187eb72ea7e6ab58ee665c0423c057bdb6f974..0000000000000000000000000000000000000000
--- a/spaces/Kevin676/Clone-Your-Voice/vocoder/train.py
+++ /dev/null
@@ -1,118 +0,0 @@
-import time
-from pathlib import Path
-
-import numpy as np
-import torch
-import torch.nn.functional as F
-from torch import optim
-from torch.utils.data import DataLoader
-
-import vocoder.hparams as hp
-from vocoder.display import stream, simple_table
-from vocoder.distribution import discretized_mix_logistic_loss
-from vocoder.gen_wavernn import gen_testset
-from vocoder.models.fatchord_version import WaveRNN
-from vocoder.vocoder_dataset import VocoderDataset, collate_vocoder
-
-
-def train(run_id: str, syn_dir: Path, voc_dir: Path, models_dir: Path, ground_truth: bool, save_every: int,
- backup_every: int, force_restart: bool):
- # Check to make sure the hop length is correctly factorised
- assert np.cumprod(hp.voc_upsample_factors)[-1] == hp.hop_length
-
- # Instantiate the model
- print("Initializing the model...")
- model = WaveRNN(
- rnn_dims=hp.voc_rnn_dims,
- fc_dims=hp.voc_fc_dims,
- bits=hp.bits,
- pad=hp.voc_pad,
- upsample_factors=hp.voc_upsample_factors,
- feat_dims=hp.num_mels,
- compute_dims=hp.voc_compute_dims,
- res_out_dims=hp.voc_res_out_dims,
- res_blocks=hp.voc_res_blocks,
- hop_length=hp.hop_length,
- sample_rate=hp.sample_rate,
- mode=hp.voc_mode
- )
-
- if torch.cuda.is_available():
- model = model.cuda()
-
- # Initialize the optimizer
- optimizer = optim.Adam(model.parameters())
- for p in optimizer.param_groups:
- p["lr"] = hp.voc_lr
- loss_func = F.cross_entropy if model.mode == "RAW" else discretized_mix_logistic_loss
-
- # Load the weights
- model_dir = models_dir / run_id
- model_dir.mkdir(exist_ok=True)
- weights_fpath = model_dir / "vocoder.pt"
- if force_restart or not weights_fpath.exists():
- print("\nStarting the training of WaveRNN from scratch\n")
- model.save(weights_fpath, optimizer)
- else:
- print("\nLoading weights at %s" % weights_fpath)
- model.load(weights_fpath, optimizer)
- print("WaveRNN weights loaded from step %d" % model.step)
-
- # Initialize the dataset
- metadata_fpath = syn_dir.joinpath("train.txt") if ground_truth else \
- voc_dir.joinpath("synthesized.txt")
- mel_dir = syn_dir.joinpath("mels") if ground_truth else voc_dir.joinpath("mels_gta")
- wav_dir = syn_dir.joinpath("audio")
- dataset = VocoderDataset(metadata_fpath, mel_dir, wav_dir)
- test_loader = DataLoader(dataset, batch_size=1, shuffle=True)
-
- # Begin the training
- simple_table([('Batch size', hp.voc_batch_size),
- ('LR', hp.voc_lr),
- ('Sequence Len', hp.voc_seq_len)])
-
- for epoch in range(1, 350):
- data_loader = DataLoader(dataset, hp.voc_batch_size, shuffle=True, num_workers=2, collate_fn=collate_vocoder)
- start = time.time()
- running_loss = 0.
-
- for i, (x, y, m) in enumerate(data_loader, 1):
- if torch.cuda.is_available():
- x, m, y = x.cuda(), m.cuda(), y.cuda()
-
- # Forward pass
- y_hat = model(x, m)
- if model.mode == 'RAW':
- y_hat = y_hat.transpose(1, 2).unsqueeze(-1)
- elif model.mode == 'MOL':
- y = y.float()
- y = y.unsqueeze(-1)
-
- # Backward pass
- loss = loss_func(y_hat, y)
- optimizer.zero_grad()
- loss.backward()
- optimizer.step()
-
- running_loss += loss.item()
- speed = i / (time.time() - start)
- avg_loss = running_loss / i
-
- step = model.get_step()
- k = step // 1000
-
- if backup_every != 0 and step % backup_every == 0 :
- model.checkpoint(model_dir, optimizer)
-
- if save_every != 0 and step % save_every == 0 :
- model.save(weights_fpath, optimizer)
-
- msg = f"| Epoch: {epoch} ({i}/{len(data_loader)}) | " \
- f"Loss: {avg_loss:.4f} | {speed:.1f} " \
- f"steps/s | Step: {k}k | "
- stream(msg)
-
-
- gen_testset(model, test_loader, hp.voc_gen_at_checkpoint, hp.voc_gen_batched,
- hp.voc_target, hp.voc_overlap, model_dir)
- print("")
diff --git a/spaces/KwabsHug/Language-Learn-Idea/app.py b/spaces/KwabsHug/Language-Learn-Idea/app.py
deleted file mode 100644
index 1e37d52d0d97af6f494fd55dc6ed51ac78edf144..0000000000000000000000000000000000000000
--- a/spaces/KwabsHug/Language-Learn-Idea/app.py
+++ /dev/null
@@ -1,1685 +0,0 @@
-from googletrans import Translator
-import spacy
-import gradio as gr
-import nltk
-from nltk.corpus import wordnet
-import wikipedia
-import re
-import time
-import random
-import os
-import zipfile
-import ffmpeg
-from gtts import gTTS
-#from io import BytesIO
-from collections import Counter
-from PIL import Image, ImageDraw, ImageFont
-import numpy as np
-from docx import Document
-import textwrap
-import pandas as pd
-import pykakasi
-import hangul_romanize
-import pinyin
-from langdetect import detect
-import datetime
-import cv2
-import math
-
-#When I forgot about the readme file ChatGPT suggested these - Leaving to remember the ReadmeF.md must be updated as well
-#print(gr.__version__)
-#import subprocess
-#subprocess.run(["pip", "install", "--upgrade", "gradio==3.47.1"]) #For huggingface as they sometimes install specific versions on container build
-
-#Uncomment these for Huggingface
-nltk.download('maxent_ne_chunker') #Chunker
-nltk.download('stopwords') #Stop Words List (Mainly Roman Languages)
-nltk.download('words') #200 000+ Alphabetical order list
-nltk.download('punkt') #Tokenizer
-nltk.download('verbnet') #For Description of Verbs
-nltk.download('omw')
-nltk.download('omw-1.4') #Multilingual Wordnet
-nltk.download('wordnet') #For Definitions, Antonyms and Synonyms
-nltk.download('shakespeare')
-nltk.download('dolch') #Sight words
-nltk.download('names') #People Names NER
-nltk.download('gazetteers') #Location NER
-nltk.download('opinion_lexicon') #Sentiment words
-nltk.download('averaged_perceptron_tagger') #Parts of Speech Tagging
-nltk.download('udhr') # Declaration of Human rights in many languages
-
-spacy.cli.download("en_core_web_sm")
-spacy.cli.download('ko_core_news_sm')
-spacy.cli.download('ja_core_news_sm')
-spacy.cli.download('zh_core_web_sm')
-spacy.cli.download("es_core_news_sm")
-spacy.cli.download("de_core_news_sm")
-
-nlp_en = spacy.load("en_core_web_sm")
-nlp_de = spacy.load("de_core_news_sm")
-nlp_es = spacy.load("es_core_news_sm")
-nlp_ko = spacy.load("ko_core_news_sm")
-nlp_ja = spacy.load("ja_core_news_sm")
-nlp_zh = spacy.load("zh_core_web_sm")
-
-nlp = spacy.load('en_core_web_sm')
-translator = Translator()
-
-def Sentencechunker(sentence):
- Sentchunks = sentence.split(" ")
- chunks = []
- for i in range(len(Sentchunks)):
- chunks.append(" ".join(Sentchunks[:i+1]))
- return " | ".join(chunks)
-
-def ReverseSentenceChunker(sentence):
- reversed_sentence = " ".join(reversed(sentence.split()))
- chunks = Sentencechunker(reversed_sentence)
- return chunks
-
-def three_words_chunk(sentence):
- words = sentence.split()
- chunks = [words[i:i+3] for i in range(len(words)-2)]
- chunks = [" ".join(chunk) for chunk in chunks]
- return " | ".join(chunks)
-
-def keep_nouns_verbs(sentence):
- doc = nlp(sentence)
- nouns_verbs = []
- for token in doc:
- if token.pos_ in ['NOUN','VERB','PUNCT']:
- nouns_verbs.append(token.text)
- return " ".join(nouns_verbs)
-
-def unique_word_count(text="", state=None):
- if state is None:
- state = {}
- words = text.split()
- word_counts = state
- for word in words:
- if word in word_counts:
- word_counts[word] += 1
- else:
- word_counts[word] = 1
- sorted_word_counts = sorted(word_counts.items(), key=lambda x: x[1], reverse=True)
- return sorted_word_counts,
-
-def Wordchunker(word):
- chunks = []
- for i in range(len(word)):
- chunks.append(word[:i+1])
- return chunks
-
-def BatchWordChunk(sentence):
- words = sentence.split(" ")
- FinalOutput = ""
- Currentchunks = ""
- ChunksasString = ""
- for word in words:
- ChunksasString = ""
- Currentchunks = Wordchunker(word)
- for chunk in Currentchunks:
- ChunksasString += chunk + " "
- FinalOutput += "\n" + ChunksasString
- return FinalOutput
-
-# Translate from English to French
-
-langdest = gr.Dropdown(choices=["af", "de", "es", "ko", "ja", "zh-cn"], label="Choose Language", value="de")
-
-ChunkModeDrop = gr.Dropdown(choices=["Chunks", "Reverse", "Three Word Chunks", "Spelling Chunks"], label="Choose Chunk Type", value="Chunks")
-
-def FrontRevSentChunk (Chunkmode, Translate, Text, langdest):
- FinalOutput = ""
- TransFinalOutput = ""
- if Chunkmode=="Chunks":
- FinalOutput += Sentencechunker(Text)
- if Chunkmode=="Reverse":
- FinalOutput += ReverseSentenceChunker(Text)
- if Chunkmode=="Three Word Chunks":
- FinalOutput += three_words_chunk(Text)
- if Chunkmode=="Spelling Chunks":
- FinalOutput += BatchWordChunk(Text)
-
- if Translate:
- TransFinalOutput = FinalOutput
- translated = translator.translate(TransFinalOutput, dest=langdest)
- FinalOutput += "\n" + translated.text
- return FinalOutput
-
-# Define a function to filter out non-verb, noun, or adjective words
-def filter_words(words):
- # Use NLTK to tag each word with its part of speech
- tagged_words = nltk.pos_tag(words)
-
- # Define a set of parts of speech to keep (verbs, nouns, adjectives)
- keep_pos = {'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ', 'NN', 'NNS', 'NNP', 'NNPS', 'JJ', 'JJR', 'JJS'}
-
- # Filter the list to only include words with the desired parts of speech
- filtered_words = [word for word, pos in tagged_words if pos in keep_pos]
-
- return filtered_words
-
-def SepHypandSynExpansion(text):
- # Tokenize the text
- tokens = nltk.word_tokenize(text)
- NoHits = ""
- FinalOutput = ""
-
- # Find synonyms and hypernyms of each word in the text
- for token in tokens:
- synonyms = []
- hypernyms = []
- for synset in wordnet.synsets(token):
- synonyms += synset.lemma_names()
- hypernyms += [hypernym.name() for hypernym in synset.hypernyms()]
- if not synonyms and not hypernyms:
- NoHits += f"{token} | "
- else:
- FinalOutput += "\n" f"{token}: hypernyms={hypernyms}, synonyms={synonyms} \n"
- NoHits = set(NoHits.split(" | "))
- NoHits = filter_words(NoHits)
- NoHits = "Words to pay special attention to: \n" + str(NoHits)
- return NoHits, FinalOutput
-
-
-def WikiSearch(term):
- termtoks = term.split(" ")
-
- for item in termtoks:
- # Search for the term on Wikipedia and get the first result
- result = wikipedia.search(item, results=20)
- return result
-
-def create_dictionary(word_list, word_dict = {}):
- word_list = set(word_list.split(" "))
- for word in word_list:
- key = word[:2]
- if key not in word_dict:
- word_dict[key] = [word]
- else:
- word_dict[key].append(word)
- return word_dict
-
-def merge_lines(roman_file, w4w_file, full_mean_file, macaronic_file):
- files = [roman_file, w4w_file, full_mean_file, macaronic_file]
- merged_lines = []
-
- with open(roman_file.name, "r") as f1, open(w4w_file.name, "r") as f2, \
- open(full_mean_file.name, "r") as f3, open(macaronic_file.name, "r") as f4:
- for lines in zip(f1, f2, f3, f4):
- merged_line = "\n".join(line.strip() for line in lines)
- merged_lines.append(merged_line)
-
- return "\n".join(merged_lines)
-
-TTSLangOptions = gr.Dropdown(choices=["en", "de", "es", "ja", "ko", "zh-cn"], value="en", label="choose the language of the srt/text accent")
-TTSLangOptions2 = gr.Dropdown(choices=["en", "de", "es", "ja", "ko", "zh-cn"], value="en", label="choose the language of the srt/text accent")
-
-def TTSforListeningPractice(text, language = "en", Repeat10x = False):
- if Repeat10x:
- text = text * 10
- speech = gTTS(text=text, lang=language, slow="False")
- speech.save("CurrentTTSFile.mp3")
- #file = BytesIO()
- #speech.write_to_fp(file)
- #file.seek(0)
- return "CurrentTTSFile.mp3" #file
-
-def AutoChorusInvestigator(sentences):
- sentences = sentences.splitlines()
- # Use Counter to count the number of occurrences of each sentence
- sentence_counts = Counter(sentences)
-
- # Identify duplicate sentences
- duplicates = [s for s, count in sentence_counts.items() if count > 1]
-
- FinalOutput = ""
- if len(duplicates) == 0:
- FinalOutput += "No duplicate sentences found in the file."
- else:
- FinalOutput += "The following sentences appear more than once in the file:"
- for sentence in duplicates:
- FinalOutput += "\n" + sentence
- return FinalOutput
-
-def AutoChorusPerWordScheduler(sentences):
- words = set(sentences.split(" "))
- wordsoneattime =[]
- practicestring = ""
-
- FinalOutput = "This is supposed to output the words in repetition format (i.e. schedule for repitition) \nCurrent Idea = 1 new word every min and 1 old word every second" + "\n\nWords: \n"
- for word in words:
- wordsoneattime.append(word)
- for i in range(0, 59):
- practicestring += word + " "
- practicestring += random.choice(wordsoneattime) + " "
- FinalOutput += word + "\n "
- practicestring += "\n"
-
- FinalOutput += practicestring
- return FinalOutput
-
-def group_words(inlist):
- inlisttoks = inlist.split(" ")
- inlistset = set(inlisttoks)
-
- word_groups = []
- current_group = []
-
- for word in inlisttoks:
- current_group.append(word)
- if len(current_group) == 10:
- word_groups.append(current_group)
- current_group = []
- if current_group:
- word_groups.append(current_group)
-
- current_group_index = 0
- current_group_time = 0
-
- while True:
- if current_group_time == 60:
- current_group_index = (current_group_index + 1) % len(word_groups)
- current_group_time = 0
- else:
- if current_group_time % 10 == 0:
- random.shuffle(word_groups[current_group_index])
- current_group_time += 10
-
- yield " ".join(word_groups[current_group_index])
- time.sleep(10)
-
-def split_verbs_nouns(text):
- nlp = spacy.load("en_core_web_sm")
- doc = nlp(text)
-
- verbs_nouns = []
- verbs_nouns_str = ""
- other_words = []
- other_words_str = ""
- pos_string = []
-
- for token in doc:
- if token.pos_ in ["VERB", "NOUN"]:
- verbs_nouns_str += token.text + " || "
- other_words_str += "__ "
- #verbs_nouns.append(token.text)
- #elif token.text in [punct.text for punct in doc if punct.is_punct]:
- # verbs_nouns.append(token.text)
- # other_words.append(token.text)
- else:
- other_words_str += token.text + " || "
- #other_words.append(token.text)
- #pos_string.append(token.pos_)
-
- verbs_nouns_text = verbs_nouns_str #" ".join(verbs_nouns)
- other_words_text = other_words_str #" ".join(other_words)
- pos_string_text = "Debug Test" #" ".join(pos_string)
-
- return other_words_text, pos_string_text, verbs_nouns_text
-
-SRTLangOptions = gr.Dropdown(choices=["en", "ja", "ko", "zh-cn"], value="en", label="choose the language of the srt")
-
-def save_string_to_file(string_to_save, file_name, srtdocx):
- with open(file_name, 'w', encoding='utf-8') as file:
- file.write(string_to_save)
- if srtdocx == "True":
- with open(file_name.split('.')[0] + '.srt', 'w', encoding='utf-8') as file:
- file.write(string_to_save)
- srtdocument = Document()
- srtdocument.add_paragraph(string_to_save)
- srtdocument.save('SplitSRT.docx')
-
-def split_srt_file(text, lang): #file_path):
- # Open the SRT file and read its contents
- #with open(file_path, 'r') as f:
- # srt_contents = f.read()
-
- if lang == "en": nlp = spacy.load('en_core_web_sm')
- if lang == "ja": nlp = spacy.load('ja_core_news_sm')
- if lang == "ko": nlp = spacy.load('ko_core_news_sm')
- if lang == "zn-cn": nlp = spacy.load('zn_core_web_sm')
-
- srt_contents = text
-
- # Split the SRT file by timestamp
- srt_sections = srt_contents.split('\n\n')
- srt_sections_POSversion = []
- subaswordlist = ""
-
- # Loop through each section of the SRT file
- for i in range(len(srt_sections)):
- # Split the section into its timestamp and subtitle text
- section_lines = srt_sections[i].split('\n')
- timestamp = section_lines[1]
- subtitle_text = ' | '.join(section_lines[2:])
- sub_split_line = nlp(subtitle_text)
- subtitle_textPOSversion = ""
- subtitle_text = ""
-
- # Replace spaces in the subtitle text with " | "
- #subtitle_text = subtitle_text.replace(' ', ' | ')
- for token in sub_split_line:
- subtitle_text += token.text + " | "
- subaswordlist += token.text + " "
- subtitle_textPOSversion += token.pos_ + " | "
-
- # Reconstruct the section with the updated subtitle text
- srt_sections[i] = f"{section_lines[0]}\n{timestamp}\n{subtitle_text[3:]}"
- srt_sections_POSversion.append(f"{section_lines[0]}\n{timestamp}\n{subtitle_textPOSversion[3:]}\n\n")
-
- SplitSRT = '\n\n'.join(srt_sections)
- SplitPOSsrt = ''.join(srt_sections_POSversion)
- save_string_to_file(SplitSRT, "SplitSRT.txt", "True")
- save_string_to_file(SplitPOSsrt, "SplitPOSsrt.txt", "False")
- subaswordlist = set(subaswordlist.split(" "))
- subaswordlistOutput = ""
-
- for word in subaswordlist:
- subaswordlistOutput += "\n | " + word
-
- subaswordlistOutput = str(len(subaswordlist)) + "\n" + subaswordlistOutput
-
- # Join the SRT sections back together into a single string
- return subaswordlistOutput, ["SplitSRT.docx", "SplitSRT.txt", "SplitSRT.srt", "SplitPOSsrt.txt"], SplitSRT, SplitPOSsrt
-
-def find_string_positions(s, string):
- positions = []
- start = 0
- while True:
- position = s.find(string, start)
- if position == -1:
- break
- positions.append(position)
- start = position + len(string)
- return positions
-
-def splittext(string):
- string_no_formaterror = string.replace(" -- > ", " --> ")
- split_positions = find_string_positions(string_no_formaterror, " --> ")
- split_strings = []
- prepos = 0
- for pos in split_positions:
- pos -= 12
- split_strings.append((string[prepos:pos])) #, string[pos:]))
- prepos = pos
-
- FinalOutput = ""
- stoutput = ""
- linenumber = 1
- #print(linenumber)
- for item in split_strings[1:]:
- stoutput = item[0:29] + "\n" + item[30:]
- stspaces = find_string_positions(stoutput, " ")
- FinalOutput += str(linenumber) + "\n" + stoutput[:stspaces[-2]] + "\n"
- FinalOutput += "\n"
- linenumber += 1
- return FinalOutput[2:]
-
-def VideotoSegment(video_file, subtitle_file):
- # Read the subtitle file and extract the timings for each subtitle
- timings = []
- for line in subtitle_file:
- if '-->' in line:
- start, end = line.split('-->')
- start_time = start.strip().replace(',', '.')
- end_time = end.strip().replace(',', '.')
- timings.append((start_time, end_time))
-
- # Cut the video into segments based on the subtitle timings
- video_segments = []
- for i, (start_time, end_time) in enumerate(timings):
- output_file = f'segment_{i}.mp4'
- ffmpeg.input(video_file, ss=start_time, to=end_time).output(output_file, codec='copy').run()
- video_segments.append(output_file)
-
- # Convert each segment to an MP3 audio file using FFmpeg
- audio_segments = []
- for i in range(len(timings)):
- output_file = f'segment_{i}.mp3'
- ffmpeg.input(video_segments[i]).output(output_file, codec='libmp3lame', qscale='4').run()
- audio_segments.append(output_file)
-
- # Create a ZIP archive containing all of the segmented files
- zip_file = zipfile.ZipFile('segmented_files.zip', 'w')
- for segment in video_segments + audio_segments:
- zip_file.write(segment)
- os.remove(segment)
- zip_file.close()
-
- # Return the ZIP archive for download
- return 'segmented_files.zip'
-
-def text_to_dropdown(text, id=None): #TextCompFormat
- lines = text.strip().split("\n")
- html = " \n"
- return html
-
-def text_to_links(text): #TextCompFormat
- lines = text.strip().split("\n")
- html = ""
- for line in lines:
- if line.startswith("http"):
- html += f" -- -- | "
- else:
- html += line + "Not a link \n"
- return html
-
-HTMLCompMode = gr.Dropdown(choices=["Dropdown", "Links"], value="Links")
-
-def TextCompFormat(text, HTMLCompMode):
- FinalOutput = ""
- if HTMLCompMode == "Dropdown":
- FinalOutput = text_to_dropdown(text)
- if HTMLCompMode == "Links":
- FinalOutput = text_to_links(text)
- return FinalOutput
-
-def create_collapsiblebutton(button_id, button_caption, div_content):
- button_html = f''
- div_html = f'
\n{div_content}\n
'
- return button_html + "\n " + div_html
-
-#---------------
-
-def removeTonalMarks(string):
- tonalMarks = "āēīōūǖáéíóúǘǎěǐǒǔǚàèìòùǜɔɛ"
- nonTonalMarks = "aeiouuaeiouuaeiouuaeiouoe"
- noTonalMarksStr = ""
- for char in string:
- index = tonalMarks.find(char)
- if index != -1:
- noTonalMarksStr += nonTonalMarks[index]
- else:
- noTonalMarksStr += char
- return noTonalMarksStr
-
-
-def add_text_to_image(input_image, text, output_image_path="output.png", border_size=2):
- text = removeTonalMarks(text)
- imagearr = np.asarray(input_image) #Image.open(input_image_path)
- width, height = imagearr.shape[:2] #width, height = image.size
- img = Image.fromarray(imagearr)
- draw = ImageDraw.Draw(img)
- font = ImageFont.truetype("ShortBaby.ttf", 36) #ShortBaby-Mg2w.ttf
- text_width, text_height = draw.textbbox((0, 0), text, font=font)[2:] #draw.textsize(text, font)
- # calculate the x, y coordinates of the text box
- x = (width - text_width) / 2
- y = (height - text_height) / 2
- # put the text on the image with a border
- for dx, dy in [(0, 0), (border_size, border_size), (-border_size, -border_size), (border_size, -border_size), (-border_size, border_size)]:
- draw.text((x + dx, y + dy), text, font=font, fill=(255, 255, 255))
- draw.text((x, y), text, font=font, fill=(0, 0, 0))
- img.save(output_image_path, "PNG")
- return "output.png"
-
-def UnknownTrackTexttoApp(text): #Copy of def OptimisedTtAppForUNWFWO(text):
- #Buttons and labels autocreation
- #Change this to spacy version so that data is from one library
- #Javascript videos on youtube - KodeBase - Change button color Onclick; bro code - button in 5 minutes
- #GPT3 helped guide the highlighting if statements
-
- FinalOutput = ""
- #sentence = "One Piece chapter 1049 spoilers Thanks to Etenboby from WG forums Chapter 1049: **\"The world we should aspire to\"** * In the cover, someone burned Niji and Yonji\u2019s book * Kaido flashback time. We see his childhood in Vodka Kingdom, and where a few years later he met Whitebeard who told him that Rocks wants to meet him * In the present, part of Raizo\u2019s water leaves the castle and flame clouds disappear. But Momo makes a new one. * Luffy says he will create a world where none of his friends would starve, then he hits Kaido and Kaido falls to the ground of the flower capital. * In another flashback, Kaido tells King that Joy Boy will be the man that can defeat him. **Additional info** *Flashback to Kaidou as a kid* *- His country tries to sell him to the marines but he escapes* *- He rampages in Hachinosu(i think it's blackbeard's island) and Rocks invites him to his crew* *- Young WB appears* *- Rocks flashback suddenly ends* *- Higurashi invites Kaidou* *- The flashback ends with Kaidou telling King he knows who Joy Boy is.* *Back to the present* \\- *Denjirou hugs Hiyori* \\- *Luffy's punch hits Kaidou* *Flashback continues* \\- *King asks: Who is it then?* \\- *Kaidou: The one who will defeat me* \\- *King: Then he will not appear* \\- *Onigashima falls near the capital* \\- *Momo falls* **BREAK NEXT WEEK** https://www.reddit.com/r/OnePiece/comments/umu2h0/one_piece_chapter_1049_spoilers/" #@param {type: "string"}
- HTMLMainbody = ""
- GradHTMLMainbody = "" #HTML in gradio components doesnt do css and js properly so nned to highlight
-
- doc = nlp(text)
- iIDNumber = 0
- iVerbCount = 0
- iNounCount = 0
- iWords = 0
- allverbs = ""
- allverbslist = ""
- allverbids = ""
- allverbidslist = ""
-
- for token in doc:
- if (token.pos_ == "VERB") or (token.pos_ == "AUX"):
- HTMLMainbody = HTMLMainbody + " "
- GradHTMLMainbody = GradHTMLMainbody + " "
- allverbids = allverbids + str(iVerbCount) + " "
- iVerbCount += 1
- iWords += 1
- allverbs = allverbs + token.text + " "
- elif token.pos_ == "NOUN":
- HTMLMainbody = HTMLMainbody + ""
- GradHTMLMainbody = GradHTMLMainbody + ""
- iNounCount += 1
- iWords += 1
- elif token.pos_ == "PUNCT":
- HTMLMainbody = HTMLMainbody + token.text
- GradHTMLMainbody = GradHTMLMainbody + token.text
- else:
- HTMLMainbody = HTMLMainbody + token.text + " "
- GradHTMLMainbody = GradHTMLMainbody + token.text + " "
- iWords += 1
- iIDNumber += 1
-
- allverbslist = allverbs.split()
- allverbidslist = allverbids.split()
-
- FinalHTML = ""
- FinalGradHTML = ""
- FinalCSS = ""
- FinalJS = ""
-
- FinalCSS = FinalCSS + '''
- '''
-
- #style='background-color:Gainsboro; There is no general style attribute for buttons but you can make a class and put the style conditions
-
- iSents = 0
- for sent in doc.sents:
- iSents += 1
-
- FinalHTML = FinalHTML + "\n
- \n
- '''
-
- FinalOutput = FinalHTML + FinalCSS + FinalJS
- FinalGradOutput = FinalGradHTML + FinalCSS + FinalJS
-
-
- HTMLDownloadTemp = f'UnknownVerbTrack.html'
-
- with open(HTMLDownloadTemp, 'w') as f:
- f.write(FinalOutput)
-
- return HTMLDownloadTemp, FinalGradOutput, FinalOutput
-
-#Kathryn Lingel - Pyambic Pentameter Example - PyCon US
-#Basic Language Model Code
-def build_model(source_text):
- list_of_words = source_text.split()
- model = {} #initialise model to empty dictionary
-
- for i, word in enumerate(list_of_words[:-1]): #every word except last word
- if not word in model: #If word not already in dictionary as a key we add it and initialise to empty array
- model[word] = []
- next_word = list_of_words[i+1]
- model[word].append(next_word) #model = dictionary per word containing previously seen next words from ANY given text ==> even lyrics
-
- translatestring = str(model)
- translatestring = translatestring.replace("'", "")
- return model, translatestring
-
-def markov_generate(source_text, num_words = 20):
- model = build_model(source_text)
- seed = random.choice(list(model.keys())) #Randomly pick a word ==> Heading of the dictionary are keys aka the words
- output = [seed] #output initialisation using random word
- for i in range(num_words):
- last_word = output[-1] #of the output list
- next_word = random.choice(model[last_word]) # next word to the above word
- output.append(next_word) #new last word in the output list
- if next_word not in model:
- break
-
- return ' '.join(output) #New list into a string aka (hopefully) sentence
-# print(markov_generate("I am the egg man they are the egg men I am the wallrus goo goo g' joob"))
-
-def chunk_srt_text(srt_text, chunk_size):
- # Split the SRT text into chunks of the specified size
- ChunkList = textwrap.wrap(srt_text, chunk_size)
- dfFinalOutput = pd.DataFrame(ChunkList, columns = [f"Chunks - { len(ChunkList) }"])
- return dfFinalOutput, ""
-
-#-------------------------------------------------------------------------------------------------------------------------------
-#Clean Merge
-
-def split_into_fours(text):
- lines = text.split('\n')
- chunks = [lines[i:i+4] for i in range(0, len(lines), 4)]
- return chunks
-
-def NumberLineSort(listlen):
- numbers = list(range(0, listlen)) # create a list of numbers 1 to 12
- grouped_numbers = []
- for i in range(4):
- group = [numbers[j] for j in range(i, len(numbers), 4)]
- grouped_numbers.append(group)
- return grouped_numbers
-
-def SRTLineSort(text):
- chunks = split_into_fours(text)
- NumberofBlocks = len(chunks) / 4
- printnumber = NumberLineSort(len(chunks))
- SRTLinenumber = []
- SRTTiming = []
- SRTContent = []
- FinalOutput = ""
-
- for i in range(0, 3):
- for item in printnumber[i]:
- if i == 0: SRTLinenumber.append(chunks[item][0])
- if i == 1: SRTTiming.append(chunks[item][0])
- if i == 2: SRTContent.append(chunks[item])
-
- for i in range(0, int(NumberofBlocks)):
- FinalOutput += SRTLinenumber[i] + "\n"
- FinalOutput += SRTTiming[i] + "\n"
- for i2 in range(0, 4):
- FinalOutput += SRTContent[i][i2] + "\n"
- FinalOutput += "\n"
-
- return FinalOutput
-
-#--------------------------------------------------------------------------------------------------------------------------------
-
-RandomiseTextType = gr.Dropdown(choices=["Words", "Words5x", "Sentences", "Paragraph", "Page"], value="Words")
-
-def RandomiseTextbyType(Text, Choice):
- FinalOutput = ""
- TempWords = []
-
- if Choice == "Words" :
- TempWords = Text.split()
- FinalOutput = reading_randomize_words(TempWords)
- if Choice == "Words5x" :
- TempWords = Text.split()
- FinalOutput = reading_randomize_words5x(TempWords)
- if Choice == "Sentences" : FinalOutput = reading_randomize_words_in_sentence(Text)
- if Choice == "Paragraph" : FinalOutput = reading_randomize_words_in_paragraph(Text)
- if Choice == "Page" : FinalOutput = "Still under Construction"
-
- return FinalOutput
-
-def reading_randomize_words5x(word):
- wordScram = ""
- for item in word:
- for i in range(5):
- item = ''.join(random.sample(item, len(item)))
- wordScram += " " + item
- #print(item)
- wordScram += "\n"
- return wordScram
-
-def reading_randomize_words(word):
- wordScram = ""
- for item in word:
- item = ''.join(random.sample(item, len(item)))
- wordScram += item + " "
- return wordScram
-
-def reading_randomize_words_in_sentence(text):
- FinalOutput = ""
- sentences = text.split(".")
- for sentence in sentences:
- words = sentence.split()
- random.shuffle(words)
- FinalOutput += ' '.join(words) + ". "
- return FinalOutput
-
-def reading_randomize_words_in_paragraph(paragraph):
- sentences = paragraph.split(".")
- random.shuffle(sentences)
- return '. '.join(sentences)
-
-def changeexposuretext(text):
- return f""
-
-#-------------------------------------------------------------------------------------------------------------------------------
-
-def ImageTranslationTest(video, subtitle):
- #Inputs from file Returns a so the path is item.name
-
- if subtitle is None:
- return video.name
-
- return [video.name, subtitle.name]
-
-#------------------------------------------------------------------------------------------------------------------------------
-
-def AutoSyllablePractice(String):
- FinalOutput = ""
-
- stringlen = len(String)
-
- vowels =["a", "e", "i", "o", "y"]
- VowelSyllables = []
- allvowels = ""
-
- for i in vowels:
- if i in String:
- allvowels = allvowels + " " + String.replace(i, i + " ")
- allvowels = allvowels + " " + String.replace(i, " " + i)
- VowelSyllables = allvowels.split(" ")
-
- VowelSyllablesstr = ""
-
- for item in VowelSyllables:
- VowelSyllablesstr += item + ", "
-
- FinalOutput += VowelSyllablesstr
- return FinalOutput
-
-def GuidedReading(textspreprocess,seperator):
- FinalOutput = ""
-
- if seperator == "Sentences":
- textspreprocess = textspreprocess.split(".")
- FinalOutput = ""
- elif seperator == "lines":
- textspreprocess = textspreprocess.splitlines()
- else: textspreprocess = textspreprocess.split(seperator)
-
- # Load language-specific models
- nlp_en = spacy.load("en_core_web_sm")
- nlp_de = spacy.load("de_core_news_sm")
- nlp_es = spacy.load("es_core_news_sm")
- nlp_ko = spacy.load("ko_core_news_sm")
- nlp_ja = spacy.load("ja_core_news_sm")
- nlp_zh = spacy.load("zh_core_web_sm")
-
- # Create a dictionary of language codes and models
- nlp_dict = {"en": nlp_en, "de": nlp_de, "es": nlp_es, "ko": nlp_ko, "ja": nlp_ja, "zh-cn": nlp_zh}
-
- # Define a function to POS tag and transliterate a text given its language code
- def pos_tag_and_transliterate(text, lang):
- # Get the model for the language
- nlp = nlp_dict.get(lang)
- if nlp is None:
- return None # No model found for the language
- # Process the text and get a list of (token, tag) tuples
- doc = nlp(text)
- original_pos_tags = [(token.text, token.pos_) for token in doc]
- # Use different libraries for different languages
- if lang == "ja":
- # Use pykakasi for Japanese
- from pykakasi import kakasi
- # Set the modes using properties
- k = kakasi()
- k.hira2a = True # Hiragana to ascii
- k.kata2a = True # Katakana to ascii
- k.kanji2a = True # Kanji to ascii
- k.roman = "Hepburn" # Use Hepburn romanization
- #words = re.findall(r"\S+|\s+", text)
- words = [token.text for token in doc]
- # Create a dictionary that maps each original word to its transliterated form with spaces
- translit_dict = {word: k.convert(word)[0]['hepburn'] for word in words}
- # Get the transliterated text with spaces
- transliterated = " ".join(translit_dict.values())
- # Replace the words in the original POS tag list with their transliterated forms
- translit_pos_tags = [(translit_dict.get(word, word), tag) for word, tag in original_pos_tags]
- # Get the transliterated language code
- lang_translit = lang + "-translit"
- elif lang == "ko":
- # Use hangul-romanize for Korean
- from hangul_romanize import Transliter
- from hangul_romanize.rule import academic
- transliter = Transliter(academic)
- # Create a dictionary that maps each original word to its transliterated form with spaces
- words = [token.text for token in doc]
- translit_dict = {word: " ".join(transliter.translit(word)) for word in words}
- # Get the transliterated text with spaces
- transliterated = " ".join(translit_dict.values())
- # Replace the words in the original POS tag list with their transliterated forms
- translit_pos_tags = [(translit_dict.get(word, word), tag) for word, tag in original_pos_tags]
- # Get the transliterated language code
- lang_translit = lang + "-translit"
- elif lang == "zh-cn":
- # Use pinyin for Chinese
- from pinyin import get
- # Get the transliterated text without spaces
- transliterated = get(text)
- # Replace the words in the original POS tag list with their transliterated forms
- translit_pos_tags = [(get(word), tag) for word, tag in original_pos_tags]
- # Get the transliterated language code
- lang_translit = lang + "-translit"
- else:
- # No transliteration needed for other languages
- return (text, original_pos_tags, text, original_pos_tags, lang)
- # Return a tuple of the original text, the original POS tags, the transliterated text, the transliterated POS tags, and the transliterated language code
- return (text, original_pos_tags, transliterated, translit_pos_tags, lang_translit)
-
- # Create an empty list to store the results
- texts = []
-
- # Loop through each text in the list
- for text in textspreprocess:
- # Detect the language of the text
- lang = detect(text)
- # Add the text and the language as a tuple to the results list
- texts.append((text, lang))
-
- # Process each text in the texts list and print the results
- for text, lang in texts:
- result = pos_tag_and_transliterate(text, lang)
- if result is not None:
- FinalOutput += f"\nLanguage: {lang}"
- FinalOutput += f"\nText: {result[0]}"
- if lang in ["ja", "ko", "zh-cn"]:
- FinalOutput += f"\nTransliterated Text: {result[2]}"
- FinalOutput += f"\n POS tags: {result[1]}"
- if lang in ["ja", "ko", "zh-cn"]:
- FinalOutput += f"\nTPOS tags: {result[3]}"
- FinalOutput += f"\n"
-
- return FinalOutput
-
-
-def create_acronym_map(text):
- """Create an acronym map from the provided text."""
- lines = text.split('\n')
- acronym_map = {}
- allacronyms = ""
-
- for line in lines:
- # Remove any special characters and split by whitespace
- words = line.split()
- acronym = ''.join([word[0].upper() for word in words if word])
- if acronym: # Avoid adding empty lines
- acronym_map[line] = acronym
- allacronyms += acronym + " | "
-
- return acronym_map, allacronyms
-
-def onlyplurals(Inputtext): #NLP or Simple Suffix check
- doc = nlp(Inputtext)
- Pluralwords = ""
- for token in doc:
- if token.tag_ == "NNS" or token.tag_ == "NNPS":
- Pluralwords = Pluralwords + token.text + " "
- TextToks = Pluralwords.split(' ')
- PluralCounts = Counter(elem for elem in TextToks)
- return Pluralwords, PluralCounts
-
-def LoadNLTKUDHRText(text):
- NLTKtext = nltk.corpus.udhr.raw(text)
- CountNLTKText = Counter(NLTKtext.split()).most_common(100)
-
- return CountNLTKText, NLTKtext
-
-NLTKudhr = gr.Dropdown(choices=['English-Latin1', 'Akuapem_Twi-UTF8', 'Zulu-Latin1', 'Afrikaans-Latin1', 'German_Deutsch-Latin1', 'Japanese_Nihongo-EUC', 'Japanese_Nihongo-SJIS', 'Japanese_Nihongo-UTF8', 'Spanish-Latin1', 'Korean_Hankuko-UTF8', 'Chinese_Mandarin-GB2312', 'Abkhaz-Cyrillic+Abkh', 'Abkhaz-UTF8', 'Achehnese-Latin1', 'Achuar-Shiwiar-Latin1', 'Adja-UTF8', 'Afaan_Oromo_Oromiffa-Latin1', 'Afrikaans-Latin1', 'Aguaruna-Latin1', 'Akuapem_Twi-UTF8', 'Albanian_Shqip-Latin1', 'Amahuaca', 'Amahuaca-Latin1', 'Amarakaeri-Latin1', 'Amuesha-Yanesha-UTF8', 'Arabela-Latin1', 'Arabic_Alarabia-Arabic', 'Asante-UTF8', 'Ashaninca-Latin1', 'Asheninca-Latin1', 'Asturian_Bable-Latin1', 'Aymara-Latin1', 'Balinese-Latin1', 'Bambara-UTF8', 'Baoule-UTF8', 'Basque_Euskara-Latin1', 'Batonu_Bariba-UTF8', 'Belorus_Belaruski-Cyrillic', 'Belorus_Belaruski-UTF8', 'Bemba-Latin1', 'Bengali-UTF8', 'Beti-UTF8', 'Bichelamar-Latin1', 'Bikol_Bicolano-Latin1', 'Bora-Latin1', 'Bosnian_Bosanski-Cyrillic', 'Bosnian_Bosanski-Latin2', 'Bosnian_Bosanski-UTF8', 'Breton-Latin1', 'Bugisnese-Latin1', 'Bulgarian_Balgarski-Cyrillic', 'Bulgarian_Balgarski-UTF8', 'Cakchiquel-Latin1', 'Campa_Pajonalino-Latin1', 'Candoshi-Shapra-Latin1', 'Caquinte-Latin1', 'Cashibo-Cacataibo-Latin1', 'Cashinahua-Latin1', 'Catalan-Latin1', 'Catalan_Catala-Latin1', 'Cebuano-Latin1', 'Chamorro-Latin1', 'Chayahuita-Latin1', 'Chechewa_Nyanja-Latin1', 'Chickasaw-Latin1', 'Chinanteco-Ajitlan-Latin1', 'Chinanteco-UTF8', 'Chinese_Mandarin-GB2312', 'Chuuk_Trukese-Latin1', 'Cokwe-Latin1', 'Corsican-Latin1', 'Croatian_Hrvatski-Latin2', 'Czech-Latin2', 'Czech-UTF8', 'Czech_Cesky-Latin2', 'Czech_Cesky-UTF8', 'Dagaare-UTF8', 'Dagbani-UTF8', 'Dangme-UTF8', 'Danish_Dansk-Latin1', 'Dendi-UTF8', 'Ditammari-UTF8', 'Dutch_Nederlands-Latin1', 'Edo-Latin1', 'English-Latin1', 'Esperanto-UTF8', 'Estonian_Eesti-Latin1', 'Ewe_Eve-UTF8', 'Fante-UTF8', 'Faroese-Latin1', 'Farsi_Persian-UTF8', 'Farsi_Persian-v2-UTF8', 'Fijian-Latin1', 'Filipino_Tagalog-Latin1', 'Finnish_Suomi-Latin1', 'Fon-UTF8', 'French_Francais-Latin1', 'Frisian-Latin1', 'Friulian_Friulano-Latin1', 'Ga-UTF8', 'Gagauz_Gagauzi-UTF8', 'Galician_Galego-Latin1', 'Garifuna_Garifuna-Latin1', 'German_Deutsch-Latin1', 'Gonja-UTF8', 'Greek_Ellinika-Greek', 'Greek_Ellinika-UTF8', 'Greenlandic_Inuktikut-Latin1', 'Guarani-Latin1', 'Guen_Mina-UTF8', 'HaitianCreole_Kreyol-Latin1', 'HaitianCreole_Popular-Latin1', 'Hani-Latin1', 'Hausa_Haoussa-Latin1', 'Hawaiian-UTF8', 'Hebrew_Ivrit-Hebrew', 'Hebrew_Ivrit-UTF8', 'Hiligaynon-Latin1', 'Hindi-UTF8', 'Hindi_web-UTF8', 'Hmong_Miao-Sichuan-Guizhou-Yunnan-Latin1', 'Hmong_Miao-SouthernEast-Guizhou-Latin1', 'Hmong_Miao_Northern-East-Guizhou-Latin1', 'Hrvatski_Croatian-Latin2', 'Huasteco-Latin1', 'Huitoto_Murui-Latin1', 'Hungarian_Magyar-Latin1', 'Hungarian_Magyar-Latin2', 'Hungarian_Magyar-UTF8', 'Ibibio_Efik-Latin1', 'Icelandic_Yslenska-Latin1', 'Ido-Latin1', 'Igbo-UTF8', 'Iloko_Ilocano-Latin1', 'Indonesian-Latin1', 'Interlingua-Latin1', 'Inuktikut_Greenlandic-Latin1', 'IrishGaelic_Gaeilge-Latin1', 'Italian-Latin1', 'Italian_Italiano-Latin1', 'Japanese_Nihongo-EUC', 'Japanese_Nihongo-SJIS', 'Japanese_Nihongo-UTF8', 'Javanese-Latin1', 'Jola-Fogny_Diola-UTF8', 'Kabye-UTF8', 'Kannada-UTF8', 'Kaonde-Latin1', 'Kapampangan-Latin1', 'Kasem-UTF8', 'Kazakh-Cyrillic', 'Kazakh-UTF8', 'Kiche_Quiche-Latin1', 'Kicongo-Latin1', 'Kimbundu_Mbundu-Latin1', 'Kinyamwezi_Nyamwezi-Latin1', 'Kinyarwanda-Latin1', 'Kituba-Latin1', 'Korean_Hankuko-UTF8', 'Kpelewo-UTF8', 'Krio-UTF8', 'Kurdish-UTF8', 'Lamnso_Lam-nso-UTF8', 'Latin_Latina-Latin1', 'Latin_Latina-v2-Latin1', 'Latvian-Latin1', 'Limba-UTF8', 'Lingala-Latin1', 'Lithuanian_Lietuviskai-Baltic', 'Lozi-Latin1', 'Luba-Kasai_Tshiluba-Latin1', 'Luganda_Ganda-Latin1', 'Lunda_Chokwe-lunda-Latin1', 'Luvale-Latin1', 'Luxembourgish_Letzebuergeusch-Latin1', 'Macedonian-UTF8', 'Madurese-Latin1', 'Makonde-Latin1', 'Malagasy-Latin1', 'Malay_BahasaMelayu-Latin1', 'Maltese-UTF8', 'Mam-Latin1', 'Maninka-UTF8', 'Maori-Latin1', 'Mapudungun_Mapuzgun-Latin1', 'Mapudungun_Mapuzgun-UTF8', 'Marshallese-Latin1', 'Matses-Latin1', 'Mayan_Yucateco-Latin1', 'Mazahua_Jnatrjo-UTF8', 'Mazateco-Latin1', 'Mende-UTF8', 'Mikmaq_Micmac-Mikmaq-Latin1', 'Minangkabau-Latin1', 'Miskito_Miskito-Latin1', 'Mixteco-Latin1', 'Mongolian_Khalkha-Cyrillic', 'Mongolian_Khalkha-UTF8', 'Moore_More-UTF8', 'Nahuatl-Latin1', 'Ndebele-Latin1', 'Nepali-UTF8', 'Ngangela_Nyemba-Latin1', 'NigerianPidginEnglish-Latin1', 'Nomatsiguenga-Latin1', 'NorthernSotho_Pedi-Sepedi-Latin1', 'Norwegian-Latin1', 'Norwegian_Norsk-Bokmal-Latin1', 'Norwegian_Norsk-Nynorsk-Latin1', 'Nyanja_Chechewa-Latin1', 'Nyanja_Chinyanja-Latin1', 'Nzema-UTF8', 'OccitanAuvergnat-Latin1', 'OccitanLanguedocien-Latin1', 'Oromiffa_AfaanOromo-Latin1', 'Osetin_Ossetian-UTF8', 'Oshiwambo_Ndonga-Latin1', 'Otomi_Nahnu-Latin1', 'Paez-Latin1', 'Palauan-Latin1', 'Peuhl-UTF8', 'Picard-Latin1', 'Pipil-Latin1', 'Polish-Latin2', 'Polish_Polski-Latin2', 'Ponapean-Latin1', 'Portuguese_Portugues-Latin1', 'Pulaar-UTF8', 'Punjabi_Panjabi-UTF8', 'Purhepecha-UTF8', 'Qechi_Kekchi-Latin1', 'Quechua-Latin1', 'Quichua-Latin1', 'Rarotongan_MaoriCookIslands-Latin1', 'Rhaeto-Romance_Rumantsch-Latin1', 'Romani-Latin1', 'Romani-UTF8', 'Romanian-Latin2', 'Romanian_Romana-Latin2', 'Rukonzo_Konjo-Latin1', 'Rundi_Kirundi-Latin1', 'Runyankore-rukiga_Nkore-kiga-Latin1', 'Russian-Cyrillic', 'Russian-UTF8', 'Russian_Russky-Cyrillic', 'Russian_Russky-UTF8', 'Sami_Lappish-UTF8', 'Sammarinese-Latin1', 'Samoan-Latin1', 'Sango_Sangho-Latin1', 'Sanskrit-UTF8', 'Saraiki-UTF8', 'Sardinian-Latin1', 'ScottishGaelic_GaidhligAlbanach-Latin1', 'Seereer-UTF8', 'Serbian_Srpski-Cyrillic', 'Serbian_Srpski-Latin2', 'Serbian_Srpski-UTF8', 'Sharanahua-Latin1', 'Shipibo-Conibo-Latin1', 'Shona-Latin1', 'Sinhala-UTF8', 'Siswati-Latin1', 'Slovak-Latin2', 'Slovak_Slovencina-Latin2', 'Slovenian_Slovenscina-Latin2', 'SolomonsPidgin_Pijin-Latin1', 'Somali-Latin1', 'Soninke_Soninkanxaane-UTF8', 'Sorbian-Latin2', 'SouthernSotho_Sotho-Sesotho-Sutu-Sesutu-Latin1', 'Spanish-Latin1', 'Spanish_Espanol-Latin1', 'Sukuma-Latin1', 'Sundanese-Latin1', 'Sussu_Soussou-Sosso-Soso-Susu-UTF8', 'Swaheli-Latin1', 'Swahili_Kiswahili-Latin1', 'Swedish_Svenska-Latin1', 'Tahitian-UTF8', 'Tenek_Huasteco-Latin1', 'Tetum-Latin1', 'Themne_Temne-UTF8', 'Tiv-Latin1', 'Toba-UTF8', 'Tojol-abal-Latin1', 'TokPisin-Latin1', 'Tonga-Latin1', 'Tongan_Tonga-Latin1', 'Totonaco-Latin1', 'Trukese_Chuuk-Latin1', 'Turkish_Turkce-Turkish', 'Turkish_Turkce-UTF8', 'Tzeltal-Latin1', 'Tzotzil-Latin1', 'Uighur_Uyghur-Latin1', 'Uighur_Uyghur-UTF8', 'Ukrainian-Cyrillic', 'Ukrainian-UTF8', 'Umbundu-Latin1', 'Urarina-Latin1', 'Uzbek-Latin1', 'Vietnamese-ALRN-UTF8', 'Vietnamese-UTF8', 'Vlach-Latin1', 'Walloon_Wallon-Latin1', 'Wama-UTF8', 'Waray-Latin1', 'Wayuu-Latin1', 'Welsh_Cymraeg-Latin1', 'WesternSotho_Tswana-Setswana-Latin1', 'Wolof-Latin1', 'Xhosa-Latin1', 'Yagua-Latin1', 'Yao-Latin1', 'Yapese-Latin1', 'Yoruba-UTF8', 'Zapoteco-Latin1', 'Zapoteco-SanLucasQuiavini-Latin1', 'Zhuang-Latin1', 'Zulu-Latin1'], label="Choose one the below languages")
-
-def SimultaneousSpellingPrac(text):
- TextToks = text.split()
- FinalOutput = "For Sentences wrap in another function that calls function per sentences (Spacy) \n"
-
- iLongestWord = 0
- for tok in TextToks:
- if len(tok) > iLongestWord: iLongestWord = len(tok)
-
- Equaltok = ""
- for tok in TextToks:
- Equaltok = Equaltok + tok.ljust(iLongestWord, '0') + " " #https://stackoverflow.com/questions/23216512/python-make-string-equal-length
-
- SimulList = []
- for i in range(0, iLongestWord):
- for tok in Equaltok.split():
- SimulList.append(tok[i])
-
- iWordSpaces = 0
- ZerosFinalOutput = ""
-
- for item in SimulList:
- iWordSpaces += 1
- ZerosFinalOutput = ZerosFinalOutput + item
- if iWordSpaces == len(TextToks):
- ZerosFinalOutput = ZerosFinalOutput + " "
- iWordSpaces = 0
-
- FinalOutput = FinalOutput + ZerosFinalOutput + " \n\n" + ZerosFinalOutput.replace("0", "") + " \n\n" + str(iLongestWord)
- return FinalOutput
-
-def FirstLetterSummary(Text):
- TextToks = Text.split(" ")
- FinalOutput = ''
- for tok in TextToks:
- FinalOutput = FinalOutput + tok[0] + " "
-
- WordSuggestLetters = FinalOutput.replace(" ","")
- WordSuggestToks = [(WordSuggestLetters[i:i+5]) for i in range(0, len(WordSuggestLetters), 5)]
- WordsSuggest = ""
-
- for text in WordSuggestToks:
- WordsSuggest = WordsSuggest + " " + text
-
- return FinalOutput, WordsSuggest
-
-#-------
-
-def imagebasedreading(inputtext):
- # Read the user input text file
- #with open("inputtext.txt", "r", encoding="utf-8") as file:
- # inputtext = file.read()
-
- inputtextlines = inputtext.splitlines()
-
- htmlpart1 = """
-
-
-
- Image Placeholder with Text Background
-
-
-
-
-
-
-
-
-
- """
-
- htmlpart2 = """
-
-
-
-
-
-
- """
-
- #If you have a gpu and imagepipeline then src in img tag = filepath of generate image
- def generate_html(textlines):
- num_containers = len(textlines)
- html_string = ""
- for i in range(num_containers):
- container = f'''
-
-
-
-
- '''
- html_string += container
- return html_string
-
- # Generate the HTML based on the number of lines
- output_html = htmlpart1 + "\n" + generate_html(inputtextlines) + "\n" + htmlpart2
-
- # Save the generated HTML
- now = datetime.datetime.now()
- filename = f"ImagePlaceholder{now.strftime('%Y-%m-%d_%H-%M')}_{inputtext[:16]}.html"
- with open(filename, "w", encoding="utf-8") as file:
- file.write(output_html)
-
- return filename, output_html, output_html
-
-#--------
-
-guidedreadingseperator = gr.Dropdown(choices=[".", ",", "lines", "Sentences"], value=".", interactive=True)
-textspreprocess = [
- ["Bing created this example \nApple is looking at buying U.K. startup for $1 billion \nApple schaut sich für 1 Milliarde Dollar ein Startup aus Großbritannien an \nApple está mirando comprar una startup del Reino Unido por mil millones de dólares \n애플은 영국의 스타트업을 10억 달러에 사려고 한다 \nアップルは、英国のスタートアップを10億ドルで買収する予定だ \n苹果正考虑以10亿美元收购英国初创公司", "\n"],
- ["This is an english sentence. This is the second english sentence", "."],
- ]
-
-LLPromptIdeas = """
-
-Lets try this I will give you the english word and you find the <> word and then mix the meaning of the word with the mnemonic
-
-Bings Explanation
-Youre welcome. Im happy to help. If I wanted to explain the rules of this game clearly and concisely, I would do something like this:
-
-The game is about finding the <> word for an English word and making a mnemonic that mixes the meaning of the word with the mnemonic.
-The game has two players: one who gives the English word and one who finds the <> word and makes the mnemonic.
-The player who gives the English word can choose any word they want, as long as it has a <> equivalent.
-The player who finds the <> word and makes the mnemonic can use any online or offline resources they want, such as dictionaries, translators, or search engines.
-The player who finds the <> word and makes the mnemonic should write their answer in the format: The <> word for X is Y <<(pinyin)>>, which means Z. Here are some possible mnemonics that mix the meaning of the word with the mnemonic: A, B, C, D.
-
-
-
-Make jokes while following rules for a syllogism jokes game:
-
-The game can be played with any number of people.
-One person starts by stating a syllogism, which is a logical argument that consists of three parts: a major premise, a minor premise, and a conclusion.
-The next person must then state a syllogism that has the same conclusion as the first syllogism, but with different major and minor premises.
-The game continues in this way until someone cannot think of a new syllogism.
-The person who makes the last valid syllogism wins the game.
-
-
-
-Do you know pydot?
-Please create code for a class diagragm using the pydot library in python for the following topic/entity
-
-
-
-(System/First request) Your job is to lengthen Text sent to you in a meaningful way. You must create 20 paragraphs for each Text line sent by the user
-(User) Text: I went to the beach
-
-
-
-
-replace as many words with emojis in the sentence Life is very sweet
-next sentence is AI Town is a virtual town where AI characters live, chat and socialize.
-
-
-
-
-
-"""
-
-LLPromptIdeasasbtns = LLPromptIdeas.split("")
-
-def loadforcopybuttonllmpromptideas():
- global LLPromptIdeasasbtns
- list = LLPromptIdeasasbtns
- return "Load the examples with the button below, Alternatively copy paste manually above", list[0], list[1], list[2], list[3], list[4]
-
-def display_website(link):
- html = f""
- gr.Info("If 404 then the space/page has probably been disabled - normally due to a better alternative")
- return html
-
-def RepititionPracticeTimeCalculator(text, reps_per_item, seconds_per_item):
- textlines = text.splitlines()
- lines = len(textlines)
- FinalOutput = f"Total Time is estimated: { lines * reps_per_item * seconds_per_item / 60 } minutes ( {lines} lines)"
- return FinalOutput
-
-
-randomExposuremessageText = ["Great Test for LLM function calling (with Gradio Client)", "Unknown Tracker Tab = Incomplete Reading Assistant Idea - HTML app based on text to be read", "Bing mnemonic - lost = dont ignore unusual sounds here inside lost cave", "1000 verbs in lists of 100, verbs = easy setence structure estimation (SVO, SOV, etc.)", "Can put any message here in the navigatoin tab"]
-
-def randommarquee():
- randomExposuremessagelistitem = ""
- randomExposuremessagelistitem = str(random.sample(randomExposuremessageText, 1)).replace("['", "").replace("']", "")
- #randomExposuremessagelistitem2 = str(random.sample(randomExposuremessageText, 1)).replace("['", "").replace("']", "")
- return f" "
-
-def TabNavigation():
- return gr.Tabs.update(selected=1) #, tabs1=nav1)
-
-def segment_video_with_opencv(file_path, segment_duration=60):
- # Open the video file
- cap = cv2.VideoCapture(file_path.name)
-
- # Get video properties
- fps = int(cap.get(cv2.CAP_PROP_FPS))
- total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
-
- # Calculate total segments required
- total_segments = math.ceil(total_frames / (fps * segment_duration))
-
- # List to store the file paths of the generated chunks
- generated_files = []
-
- for segment in range(total_segments):
- # Define the codec and create VideoWriter object
- # For .mp4 output, use the H.264 codec with the tag 'mp4v'
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
- output_filename = f'chunk_{segment}.mp4'
- out = cv2.VideoWriter(output_filename, fourcc, fps, (int(cap.get(3)), int(cap.get(4))))
-
- for frame_num in range(fps * segment_duration):
- ret, frame = cap.read()
- if ret:
- out.write(frame)
- else:
- break
- out.release()
-
- # Append the file path of the generated chunk to the list
- generated_files.append(output_filename)
-
- cap.release()
-
- return generated_files
-
-def fill_lines(input, num_lines=1000):
- # Split the input by newline and store it in a list
- input_list = input.splitlines()
- # Calculate how many lines each part of the input should get
- lines_per_part = int(num_lines // len(input_list))
- # Initialize an empty list to store the output
- output_list = []
- currentpart = ""
- # Loop through each part of the input
- for part in input_list:
-
- currentpart += part + "\n"
- # Fill the list of strings into one string with newlines
- filled_part = currentpart * lines_per_part #textwrap.fill(wrapped_part, width=lines_per_part)
- # Append the filled part to the output list
- output_list.append(filled_part)
- currentpart = ""
- # Join the output list into one string with newlines
- output = "\n".join(output_list)
-
- return output
-
-def TestSplitandUpdatebtntest():
- gr.Info("Incomplete - Text Chosen for Interface")
- pass
-
-def TestSplitandUpdate(Text):
-
- return f" Length of the text - { len(Text) }", gr.Button("Incomplete - Set this Text as default for all interfaces") #.click(TestSplitandUpdatebtntest, inputs=None, outputs=None) - Returns the event instead of the button with the event
-
-TestSplitandUpdateinput = gr.Textbox(placeholder="Counter and Placeholder one point of entry for the text to be analysed across the whole app")
-
-def RepititionInjectedReading(learning, reading):
- readingdoc = nlp(reading)
- learninglist = learning.splitlines()
- FinalOutput = ""
- numofsentencesinreading = sum(1 for _ in readingdoc.sents) #len(readingdoc.sents) is wrong because of generator
- numofsentencesinlearning = len(learninglist)
- RepInjectedText = "\n"
-
- for i in range(0, numofsentencesinlearning):
- for sent in readingdoc.sents:
- RepInjectedText += sent.text + " (" + learninglist[i] + ") "
-
- FinalOutput = f"{ numofsentencesinreading } repitition oppurtunities between the sentences: \n { RepInjectedText }"
-
- return FinalOutput
-
-# For testing purposes
-# file_paths = segment_video_with_opencv("path_to_your_video.mp4")
-# print(file_paths)
-
-
-# Define the Gradio interface inputs and outputs for video split
-spvvideo_file_input = gr.File(label='Video File')
-spvsubtitle_file_input = gr.File(label='Subtitle File')
-spvdownload_output = gr.File(label='Download Segmented Files')
-
-Markovlength = gr.Number(value=30, label='Length of generation')
-
-groupinput_text = gr.Textbox(lines=2, label="Enter a list of words")
-groupoutput_text = gr.Textbox(label="Grouped words")
-
-Translationchuncksize = gr.Number(value=4998)
-RepSched_Num_lines = gr.Number(value=1000, label="number of lines")
-
-randomExposuremessage = randommarquee()
-randomExposuremessage2 = randommarquee()
-
-VideoTestInput = gr.File(label="select a mp4 video file", file_types=[".mp4"])
-VideoTestSubtitleInput = gr.File(label="select a subtitle file", file_types=[".txt", ".srt", ".vtt"])
-VideoSplitTestInput = gr.File(label="select a mp4 video file", file_types=[".mp4"])
-
-with gr.Blocks() as lliface: #theme=gr.themes.Glass(primary_hue='green', secondary_hue='red', neutral_hue='blue', )
- gr.HTML('
---- Under Construction: Very Slowly figuring out what AI intergrated interface means (Chat vs Forms vs Function calling vs Sensor + Trigger vs Agent) | How to end copy paste once and for all? ----
All the apis from the below space need to be treated like RAG as notes for the LLM to read before providing its answer
')
- with gr.Accordion("LLM HF Spaces/Sites (Click Here to Open) - Use 'Acronym Map Creation Space' Tab with this - Ask for Translation of image tags made below, sentence to emojis, Wordlists, Test Conversations, Get Grammar Explanations etc., Can use GPT-4 or new SOTA to review the conversation", open=False):
- with gr.Row():
- linktochat = gr.Dropdown(choices=["https://sdk.vercel.ai/docs", "https://labs.perplexity.ai/", "https://chat.lmsys.org", "https://huggingfaceh4-zephyr-chat.hf.space", "https://osanseviero-mistral-super-fast.hf.space", "https://artificialguybr-qwen-14b-chat-demo.hf.space", "https://huggingface-projects-llama-2-7b-chat.hf.space", "https://ysharma-explore-llamav2-with-tgi.hf.space", "https://mosaicml-mpt-30b-chat.hf.space", "https://huggingfaceh4-falcon-chat.hf.space", "https://uwnlp-guanaco-playground-tgi.hf.space", "https://stabilityai-stablelm-tuned-alpha-chat.hf.space", "https://mosaicml-mpt-7b-storywriter.hf.space", "https://huggingfaceh4-starchat-playground.hf.space", "https://bigcode-bigcode-playground.hf.space", "https://mosaicml-mpt-7b-chat.hf.space", "https://huggingchat-chat-ui.hf.space", "https://togethercomputer-openchatkit.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
- chatspacebtn = gr.Button("Use the chosen URL to load interface with a chat model. For sdk.vercel click the chat button on the top left")
- with gr.Accordion("Some prompt ideas", open=False):
- with gr.Accordion("Prompts in text (Manual copy paste)", open=False):
- gr.HTML(LLPromptIdeas)
- gr.Interface(loadforcopybuttonllmpromptideas, inputs=None, outputs=["html", "code", "code", "code", "code", "code"])
- chatspace = gr.HTML("Chat Space Chosen will load here")
- chatspacebtn.click(display_website, inputs=linktochat, outputs=chatspace)
- with gr.Accordion("Image HF Spaces/Sites (Click Here to Open) - Use with the image placeholder in Workflows tab", open=False):
- with gr.Row():
- linktoimagegen = gr.Dropdown(choices=["https://simianluo-latent-consistency-model.hf.space", "https://google-sdxl.hf.space", "https://fffiloni-sdxl-control-loras.hf.space"], label="Choose/Cancel type any .hf.space link here (can also type a link)'", allow_custom_value=True)
- imagegenspacebtn = gr.Button("Use the chosen URL to load interface with a chat model")
- imagegenspace = gr.HTML("Chat Space Chosen will load here")
- imagegenspacebtn.click(display_website, inputs=linktoimagegen, outputs=imagegenspace)
- gr.HTML("
")
- with gr.Row():
- with gr.Column(scale=1):
- with gr.Tabs() as nav1:
- with gr.Tab("Rep - HTML"):
- gr.HTML("UNNWFWO = Unknown Native Word Foreign Word Order i.e. during active listening practice you only need the words you dont know")
- gr.HTML("""""")
- with gr.Tab("Rep - Gradio"):
- gr.Interface(fn=group_words, inputs=groupinput_text, outputs=groupoutput_text, description="Word Grouping and Rotation - Group a list of words into sets of 10 and rotate them every 60 seconds.") #.queue()
- with gr.Tab("Navigation"):
- gr.HTML("Primary goal of this space is to help with memorisation --> Two main forms read or listen (rewriting is also an option for mission critical information - acronym map (too time comsuming))")
- gr.HTML("Picture Annotation Chorus Focused Word List Merged Subtitles Repetitive Audio (TTS) Word and Sentence Jumbling Unkown: Wordnet Unknown: Wikipeadia ")
- PracticeExposureInput = gr.Textbox("", placeholder="Exposure practice = look up", label="Exposure at the top")
- PracticeExposurebtn = gr.Button("Change Default") #Button CLick is defined under the variable it needs to manipulate to avoid undefined error
- gr.Button("Tab Navigation").click(TabNavigation, inputs=None, outputs=[nav1])
- with gr.Tab("Words Lists"):
- gr.HTML("Stop, Sight(Dolch) and other Wordlists")
- gr.HTML("Wikipeadia Basic: -- Dolch (Sight) Words -- | Advanced: -- Blend Word -- | -- List_of_portmanteaus -- | ")
- gr.HTML("Reddit -- Wordplay -- | ")
- gr.HTML("Language Tests ")
- gr.HTML("Other -- English (StackExchange) -- | -- Overlapping Blends (StackExchange) -- | ")
- with gr.Tab("Vector Database = Memorisation"):
- gr.HTML("Phrasebook on demand in realtime
Open AI - 10000 * 1000tokens (+- 4000 characters) = 1$ (0.0001 per 1000 tokens / 750 words), Cohere Multilingual = free for personal use / Commercial use = \n Vector Database query = Better than text search but not for logical relationships")
- with gr.Tab("Time Estimate Calculator"):
- gr.HTML("Repitition = A subconcious time gaame - transparent screens + below repitition assist (Vision) or (Audio) ")
- gr.Interface(fn=RepititionPracticeTimeCalculator, inputs=["text", "number", "number"], outputs="text")
- with gr.Column(scale=3):
- with gr.Tab("Workflows"):
- with gr.Row():
- gr.HTML("Start at Unkown Tracker if unseure UNNWFWO = Unknown Native Word Foreign Word Order i.e. during active listening practice you only need the words you dont know
General Ideas in this space - Speed of Learning = Avoid Things you know like the plague -- How to track what you know -- Counter is easiest and How you feel is the hardest (The more you know, the more confusion on what you dont know as you probably werent keeping track)
Visulisation of long text - Bottom of this page Wordlist - 1 new word at a time per minute in the space to the left Youtube Video Watching - Subtitles Tab Reading - Unknown Tracker Tabs Longer Text Memorising - Acronym Map Creation Tab and Transition Tab Brainstorming - Reading Assistant Random Exposure ")
- gr.Interface(fn=TestSplitandUpdate, inputs=TestSplitandUpdateinput, outputs=["text", "button"])
- with gr.Row():
- PracticeExposure = gr.HTML(randomExposuremessage)
- PracticeExposure2 = gr.HTML(randomExposuremessage2)
- PracticeExposurebtn.click(fn=changeexposuretext, inputs=PracticeExposureInput, outputs=PracticeExposure)
- with gr.Row():
- with gr.Column(scale=1):
- gr.HTML("Advanced Repitition = Combinatorics --> to understand a sentence properly you need understanding of every word --> in language that means use with other words --> Combos within the unique words in a sentence, paragraph, page, etc. --> as close to 3 word sentences")
- with gr.Column(scale=1):
- gr.HTML("
Timing Practice - Repitition: Run from it, Dread it, Repitition is inevitable - Thanos --> Repitition of reaction - Foreign in eyes/ears native in mind (For beginners) | Repitition is a multitask activity like driving must be subconcious process to show mastery
This is where TTS helps as you are ignoring all words except the words just before the actual Tiny Stories dataset is like a graded reader ")
- gr.Interface(fn=TTSforListeningPractice, inputs=["text", TTSLangOptions, "checkbox"], outputs="audio", description="Paste chorus lyrics from below here and use TTS or make notes to save here (Or paste anything)")
- with gr.Accordion("TTS Spaces", open=False):
- TTSspaceoptions = gr.Dropdown(choices=["https://suno-bark.hf.space", "https://coqui-xtts.hf.space"], label="existing whisper spaces")
- TTSspaceoptionsbtn = gr.Button("Load a Image as prompt Space")
- TTSspaceoptionsOut = gr.HTML()
- TTSspaceoptionsbtn.click(fn=display_website, inputs=TTSspaceoptions, outputs=TTSspaceoptionsOut)
- gr.HTML("
Fastest way to learn words = is to have your own sound reference --> probably why babies learn fast as they make random noise
If you know the flow of the song you can remember the spelling easier
Essentially if the sounds are repeated or long notes they are easy to remember
")
- gr.Interface(fn=AutoChorusInvestigator, inputs="text", outputs="text", description="Paste Full Lyrics to try find only chorus lines")
- gr.Interface(fn=AutoChorusPerWordScheduler, inputs="text", outputs="text", description="Create order of repitition for tts practice")
- with gr.Column(scale=1):
- gr.HTML("""Reading - Caption images (SD/Dalle-E) -- Unsplash - free images -- | --Huggingface CLIP-Interrogator Space-- | -- Clip interrogator 2 -- | -- Tag2Text is faster than clip -- | -- Transform word to an image -- | -- Promptist (Microsoft) -- | -- RAM and Tag2Text -- | -- SAM with Clip -- """)
- with gr.Accordion("RAM/Tag2Text Space - Create Tags here and Copy paste", open=False):
- RAMSpaceLink = gr.Textbox("https://xinyu1205-recognize-anything.hf.space")
- RAMSpacetest = gr.HTML("")
- RAMSpacetestbtn = gr.Button('Load Space')
- RAMSpacetestbtn.click(display_website, RAMSpaceLink, RAMSpacetest)
- with gr.Accordion("SAM Space Test", open=False):
- SAMSpaceLink = gr.Textbox("https://curt-park-segment-anything-with-clip.hf.space")
- SAMSpacetest = gr.HTML("")
- SAMSpacetestbtn = gr.Button('Load Space')
- SAMSpacetestbtn.click(display_website, SAMSpaceLink, SAMSpacetest)
- gr.HTML("Use Shift Enter To put text on new lines if the text doesnt fit if theres an error you have to remove the foreign letters and place roman ones")
- gr.Interface(fn=add_text_to_image , inputs=["image", "text"], outputs="image", description="Create Annotated images (Can create using stable diffusion and use the prompt) - Describe from one side to the other to make guessing easy")
- #with gr.Tab("Transcribe - RASMUS Whisper"):
- #gr.Interface.load("spaces/RASMUS/Whisper-youtube-crosslingual-subtitles", title="Subtitles")
- with gr.Tab("Beginner - Reading Assitant + Unknown Tracker"):
- gr.HTML(" -- Microsoft Immersive Reader (Comprehension) -- | LingQ - (Word Familiarity based) ")
- gr.HTML("Repitition of things you know is a waste of time when theres stuff you dont know
In Language the goal is bigger vocab --> Knowledge equivalent = question answer pairs but to get to those you need related information pairs
Vocab = Glossary + all non text wall(lists, diagrams, etc.)
")
- gr.Textbox("Placeholder for a function that creates a set list and can takes a list for known words and auto find replaces the stuff you know out of the content")
- gr.Interface(fn=GuidedReading, inputs=["text", guidedreadingseperator], outputs="text", description="Manual POS Tag and Transliteration", examples=textspreprocess)
- gr.HTML("Place holder for a translate to english interface so that highlighting can still work as only english supported for now - -- Google Translate -- ")
- with gr.Tab("Unique word ID - use in Infranodus"):
- with gr.Accordion(label="Infranodus", open=False):
- gr.HTML(" -- Infranodus - Word Level Knowledge graphs -- | Use the below interfaces to find the items that dont have entries --> These will represent new concepts or people which need to be understood individually to fully understand the text --> Infranodus search will help find related and unrelated investigation paths
TODO Figure Output Zoom / Image Dimensions")
- gr.Image(source="upload", label="Open Infranodus Screenshot")
- gr.Image(source="upload", label="Open Infranodus Screenshot")
- gr.Interface(fn=unique_word_count, inputs="text", outputs="text", description="Wordcounter")
- gr.HTML("Use the below interface to fill in the space in this format and then use the chat iframe at the top to ask llm to analyse this:
Consider how the following sentence meaning will change if the each if the selected word is replaced with one hypernym at a time: Sentence: Hypernyms: ")
- gr.Interface(fn=SepHypandSynExpansion, inputs="text", outputs=["text", "text"], description="Word suggestions - Analyse the unique words in infranodus")
- gr.Interface(fn=WikiSearch, inputs="text", outputs="text", description="One word at a time Unique word suggestions (wiki articles)")
- with gr.Tab("Automating related information linking"):
- gr.HTML("Questions - Taking and suggesting questions to ask = new education --> Esp. Infranodus type outer discourse identification as question generation")
- gr.HTML("The point of reading is to refine future actions especially problem solving --> Creating problem scenarios = thinking ahead of time = One form of effective reading")
- with gr.Tab("Beginner - Vague Language and Guessing POS"):
- with gr.Row():
- gr.HTML("Some Vague Words - Quantifiers, Pronouns, etc.
Very, Many, Few, Lots, Lets add 40 words to this list Find Replace all nouns with something/someone or and for verbs figure out how to generalise them")
- gr.HTML("Parts of speech recognition = comprehension Three word sentences will give a easier guessing chance")
- gr.HTML('')
- with gr.Tab("Advanced - Making Questions = Reading"):
- gr.HTML("Some Example Prompts
Please make 10 questions baseed on this text: ")
- with gr.Row():
- gr.TextArea("Paste the text to read here", interactive=True)
- gr.TextArea("Make as many questions on the text as you can in native language and then translate", interactive=True)
- gr.Dropdown(["Placeholder chunk 1", "Placeholder chunk 2", "Placeholder chunk 3"])
- gr.HTML("Load the current chunk here and Put a Dataframe where you have only one column for the questions")
- with gr.Tab('Acronym Map Creation Space'):
- gr.HTML("Acronym cant be read with previous attentive reading - accurate measure of known vs unknown")
- with gr.Row():
- with gr.Accordion('Acronym Map/Skeleton Creator'):
- gr.HTML("Moved to Progress for now")
- with gr.Accordion('Test with LLM'):
- gr.Label('Letters are always easier to recall than whole words. GPT 4 and above best suited for this prompt but can test anywhere')
- gr.HTML('Please help me study by making a acronym map for the maths ontology (Ask if theres questions)')
- gr.TextArea('', label='Paste LLM response')
- gr.HTML('Good but we need to now create a 9 Acronym based words - 1 for the headings together and then one each for the subheadings')
- gr.TextArea('', label='Paste LLM response')
- with gr.Accordion(''):
- gr.HTML('If study content was a map the first letters shape of the whole text = Roads')
- gr.HTML('Known = ability to match an item to a retrieval cue instantly - Retrieval cue for the whole text = Acronym Map')
- with gr.Tab("Advanced - Youtube - Subtitles - LingQ Addon Ideas"):
- gr.HTML("Find LingQ Here --> https://www.lingq.com/en/")
- with gr.Tab("Visual - Multiline Custom Video Subtitles"):
- gr.HTML("LingQ Companion Idea - i.e. Full Translation Read along, and eventually Videoplayer watch along like RAMUS whisper space
Extra functions needed - Persitent Sentence translation, UNWFWO, POS tagging and Word Count per user of words in their account. Macaronic Text is also another way to practice only the important information")
- gr.HTML("""
For Transcripts to any video on youtube use the link below ⬇️
If Space not loaded its because of offline devopment errors please message for edit
")
- with gr.Tab("Merged Subtitles"):
- gr.HTML(""" Core Idea = Ability to follow one video from start to finish is more important than number of words (except for verbs)
- Step 1 - Get foreign transcript - WHISPER (Need to download video though - booo) / Youtube / Youtube transcript api / SRT websites
- Step 2 - Get Translation of foreign transcript
- Step 3 - Word for Word Translation Creation in both Directions (Paste Google Translation here)
- """)
- gr.Interface(fn=split_srt_file, inputs=["text", SRTLangOptions] , outputs=["text", "file", "text", "text"], description="SRT Contents to W4W Split SRT for Google Translate")
- gr.Interface(fn=chunk_srt_text, inputs=['text', Translationchuncksize], outputs=['dataframe','text'], description='Assitant for google translate character limit - aka where to expect cuts in the text')
- gr.HTML("Step 4 - Pronounciation (Roman) to Subtitle Format --> GTranslate returns unformatted string")
- gr.Interface(fn=splittext, inputs="text", outputs="text", description="Text for w4w creation in G Translate")
- gr.HTML("Step 5 - Merge into one file")
- with gr.Row():
- RomanFile = gr.File(label="Paste Roman")
- W4WFile = gr.File(label="Paste Word 4 Word")
- FullMeanFile = gr.File(label="Paste Full Meaning")
- MacaronicFile = gr.File(label="Paste Macaronic Text")
- SentGramFormula = gr.File(label="Paste Sentence Grammar Formula Text")
- with gr.Row():
- MergeButton = gr.Button(label='Merge the seperate files into one interpolated file (Line by line merge)')
- with gr.Row():
- MergeOutput = gr.TextArea(label="Output")
- MergeButton.click(merge_lines, inputs=[RomanFile, W4WFile, FullMeanFile, MacaronicFile], outputs=[MergeOutput], )
- with gr.Row():
- gr.Text("Make sure there are 4 spaces after the last subtitle block (Otherwise its skipped)")
- CleanedMergeButton = gr.Button(label='Create a Usable file for SRT')
- with gr.Row():
- CleanedMergeOutput = gr.TextArea(label="Output")
- CleanedMergeButton.click(fn=SRTLineSort, inputs=[MergeOutput], outputs=[CleanedMergeOutput])
- with gr.Tab("Split video to segments"):
- gr.HTML("How to make screenshot in vlc - https://www.vlchelp.com/automated-screenshots-interval/ ")
- gr.Interface(VideotoSegment, inputs=[spvvideo_file_input, spvsubtitle_file_input], outputs=spvdownload_output)
- gr.TextArea("Placeholder for ffmpeg command generator and ffmpeg-python code to split video")
- gr.Text("Text to Closed Class + Adjectives + Punctuation or Noun Verb + Punctuation ")
- with gr.Tab("Audio - Only English thoughts as practice"):
- gr.HTML("For Audio Most productive is real time recall of native (where your full reasoning ability will always be) Find Replace new lines of the foreign text with full stops or | to get per word translation")
- gr.Interface(fn=TTSforListeningPractice, inputs=["text", TTSLangOptions2], outputs="audio", description="Paste only english words in foreign order and then keep removing the words from this to practice as effectively")
- with gr.Tab("Transition is the end goal (SOV, SVO, VSO)"):
- gr.Interface(fn=FrontRevSentChunk, inputs=[ChunkModeDrop, "checkbox", "text", langdest], outputs="text", description="Chunks creator")
- with gr.Row():
- with gr.Column():
- gr.Interface(fn=AutoSyllablePractice, inputs="text", outputs="text", description="One Word At A Time | Audio Spelling Practice Using vowels as the seperator")
- gr.Textbox("A word is a list of letter as a fact is a list of words. Both are in a specific order. What is most important is practice the order so randomiser is the tool", lines=4)
- gr.Interface(fn=RandomiseTextbyType, inputs=["text", RandomiseTextType], outputs="text", description="Randomise order within words, sentences, paragrahs")
- with gr.Column():
- #with gr.Tab("Collocations (Markov)"):
- gr.HTML("Transition is the true nature of logic i.e. like some form of non-semantic embedding that is semantic?")
- gr.Interface(fn=build_model, inputs="text", outputs=["text", "text"], description="Create Collocation Dictionary --> Google Kathryn Lingel - Pyambic Pentameter Example - PyCon US for more")
- gr.Interface(fn=markov_generate, inputs=["text", Markovlength], outputs="text", description="Generate Text based on the collocations in the text")
- with gr.Column():
- #with gr.Tab("Spelling + Chunks"):
- gr.Textbox("Merged Spelling Practice Placeholder - Spell multiple words simultaneously for simultaneous access", lines=3)
- gr.HTML("
Spell multiple words simultaneously for simultaneous access
Spelling Simplification - Use a dual language list? | Spelling is the end goal, you already know many letter orders called words so you need leverage them to remember random sequences")
- gr.Interface(fn=create_dictionary, inputs="text", outputs="text", title="Sort Text by first two letters")
- gr.Interface(fn=keep_nouns_verbs, inputs=["text"], outputs="text", description="Noun and Verbs only (Plus punctuation)")
- with gr.Tab("Thinking Practice (POS)"):
- gr.HTML("By removing all nouns and verbs you get a format to practice thinking about your words to use to make sentences which make sense within constraints")
- with gr.Row():
- with gr.Column():
- with gr.Tab("Sentence to Practice Format"):
- gr.Interface(fn=split_verbs_nouns , inputs="text", outputs=["text", "text", "text"], description="Comprehension reading and Sentence Format Creator")
- with gr.Column():
- gr.HTML(" -- SQL Dataset - A list of simple questions -- |")
- gr.Textbox(label='Use this text to hold translations of the SQL rows in the above linked dataset (A kind of What I say vs what I want)')
- with gr.Tab("Knowledge Ideas - Notetaking"):
- gr.HTML("""
Good knowledge = ability to answer questions --> find Questions you cant answer and look for hidden answer within them
-
My One Word Theory = We only use more words than needed when we have to or are bored --> Headings exist because title is not sufficient, subheadings exist because headings are not sufficient, Book Text exists because subheadings are not sufficient
-
Big Picture = Expand the Heading and the subheadings and compare them to each other
-
Application of Knowledge = App Version of the text (eg. Jupyter Notebooks) is what you create and learn first
- """)
- gr.Label('Placeholder for LLM api plus the drop down function below populate text for each line into dropdowns')
- gr.Interface(fn=TextCompFormat, inputs=["textarea", HTMLCompMode], outputs="text", description="Convert Text to HTML Dropdown or Links which you paste in any html file")
- gr.Interface(fn=create_collapsiblebutton, inputs=["textbox", "textbox", "textarea"], outputs="textbox", description="Button and Div HTML Generator, Generate the HTML for a button and the corresponding div element.")
- with gr.Tab("Real-Time AI - Video/Audio/AR"):
- gr.HTML("HUD Experiment (Waiting for GPT4V API)- Full context of user situation + Ability to communicate in real-time to user using images (H100+ and low enough resolution and low enough steps - it/s = fps) - just like google maps but for real life")
- gr.Interface(fn=ImageTranslationTest , inputs=[VideoTestInput, VideoTestSubtitleInput], outputs="video")
- with gr.Accordion("Whisper Spaces"):
- Whisperspaceoptions = gr.Dropdown(choices=["https://sanchit-gandhi-whisper-jax-diarization.hf.space", "https://sanchit-gandhi-whisper-jax.hf.space", "https://sanchit-gandhi-whisper-large-v2.hf.space", "https://facebook-seamless-m4t.hf.space"], label="existing whisper spaces")
- Whisperspaceoptionsbtn = gr.Button("Load Whisper Space")
- WhisperspaceoptionsOut = gr.HTML()
- Whisperspaceoptionsbtn.click(fn=display_website, inputs=Whisperspaceoptions, outputs=WhisperspaceoptionsOut)
- with gr.Accordion("Image as prompt Spaces"):
- Imagepromptspaceoptions = gr.Dropdown(choices=["https://badayvedat-llava.hf.space", "https://xinyu1205-recognize-anything.hf.space"], label="existing whisper spaces")
- Imagepromptspaceoptionsbtn = gr.Button("Load a Image as prompt Space")
- ImagepromptspaceoptionsOut = gr.HTML()
- Imagepromptspaceoptionsbtn.click(fn=display_website, inputs=Imagepromptspaceoptions, outputs=ImagepromptspaceoptionsOut)
- with gr.Accordion("Old Ideas to consider", open=False):
- gr.HTML("Nicolai Nielsen Youtube channel - aruco markers = position --> can test using premade ones from an image search")
- gr.Textbox("Alpha Test version = Real time Lablling of All things in view using SAM and Clip Interrogator and OpenCV on pydroid --> Adjusted Demo")
- gr.HTML("Some Prompt ideas --> Prompt: Describe the place where these descriptions may be (You job is to be speculative for brainstorming purposes): A dog and a boy, the area is texas, the weather is sunny, the date is 01 May 2021 Prompt Content Ideas Ideas Clip Interrogator + Location Data aka tags for place, location and time + general news updates on the location + overview of the items in the location Location based advise is most important but after that is information observed by appliances in the location eg. Times Computer turned on, times geyser inspected, amount of time keys havent been touched etc. each location will have an ai personality that will relay more information ")
- gr.HTML(" -- RAM and Tag2Text -- | -- SAM with Clip -- ")
- with gr.Tab("Incomplete Tests and Experiments"):
- with gr.Tab("Graph Based Reading", id="1"):
- gr.Textbox('Parts of Speech based | Automating the Notetaking Tab either directly or using visual llm to use this interface efficiently')
- gr.HTML("Types of comprehension agent Speed of Comprehension = Verb comprehension From the following please extract the verbs now explain each in context Next, use picture descriptions for each word in the verb list Create combinations using the verb list ")
- gr.HTML("How VERBS RELATE TO EACH OTHER --> Shared Nodes - what other verbs are connected to the noun in a INFRANODUS With POS Tag filters")
- gr.HTML("Tree and Branches approach to learning = familiarity with keywords/headings/summaries before reading the whole text Productivity/Work revolves around repitition which can be found looking for plurals and grouping terms eg. Headings and Hyper/Hyponyms Analysis")
- gr.HTML("Sentence to PyDot graph")
- gr.HTML("Currently a bug that locks all buttons in the space when you use this above example - Reload to fix")
- with gr.Tab("Random Ideas"):
- gr.HTML("""
Spaces Test - Still Undercontruction --> Next Milestone is Turning this interface handsfree | Knowledge is a Language but productive knowledge is find replace as well | LingQ is good option for per word state management
Arrows app json creator for easy knowledge graphing and spacy POS graph? --> Questions? -->
-
ChatGPT Turns Learning into a read only what you dont know ask only what you dont know feedback loop --> All you have to do is keep track of what prompts you have asked in the past
""")
- gr.HTML("
Target 0: Mnemonics as title of images --> Comprehensible input Target 1: Dual audio at word Level while using repitition to train random recall --> Word level Time Target 2: Video --> Split by sentence --> each word repeated (60) + each phrase (10) + each sentence (10) --> TTS file for practice --> State Management/Known word Tracker ----------------------- The trick is minimum one minute of focus on a new word --> Listening is hard because there are new word within seconds and you need repeated focus on each to learn
Audio = best long form attention mechanism AS it is ANTICIPATION (Awareness of something before it happens like knowing song Lyrics) FOCUSED - Attention (Focused Repitition) + Exposure (Random Repitition)
Listening is hard due to different word order and word combinations (collocations more important than single words)
")
- gr.HTML("Predictable to identify the parts of picture being described --> The description moves in one direction from one side of the image to the other side is easiest ")
- gr.HTML("Image = instant comprehension like Stable Diffusion --> Audiovisual experience is the most optimal reading experience Manga with summary descriptions for the chapters = Most aligned visual to audio experience")
- with gr.Tab("AI Tools, Prompts and games"):
- gr.HTML("TODO = Llama-cpp-python with falcon 7b / openllama 7b intergrated into each of the interfaces in this space aka --> interfaces as tools for open source llm
Test using gradio space/interfaces through the api as function calls for gpt3.5 and 4")
- with gr.Accordion('Command Based Tools - Instant verification of ability to describe'):
- gr.HTML("Roblox - -- Roblox Assistant -- | ")
- #with gr.Tab("Gradio Client Tests"):
- # gr.HTML("How to return componets here in gradio (as each client interface needs different inputs) like in react")
- with gr.Tab("Current Ideas to edit old sections"):
- gr.HTML("The core themes = scheduling (randomisation and calendar marking), speed practice, visualisation, and audio, repitition, compression and finally Tracking and only learning the unknown")
- gr.HTML("Parts that are already done - Repition and scheduling (randomisation) on the sidebar, compresion using the acronym tab, Audio in the beginning tab, unknown partially in HTML creator")
- gr.HTML("Parts that are not done - Visualisation (of acronyms / duo word sets / nouns and verbs) - The image placeholder creator script, Tracking (private = database, public = textfile export), calendar based scheduling aka alert based ")
- gr.HTML("React Version of the app can combine all of these use cases into one component - so far tracking, placeholder and partially scheduling have been done")
- gr.Label('True speed simultaneous - which is a boolean state = practice at simulataneous to get simultaneous |||| Another way to be fast is to practice simultaneously with the varios SOVs i.e. when you read a noun the verb must appear immediately and vice versa |||| Simultaneous Spelling is the other way to practice |||| The main goal of all reading is that next time you read you take less time this time: |||| Spped = ability to anticipate the next word |||| Anticipation of a sentence = POV |||| ')
- with gr.Tab("Text to image for only nouns "):
- gr.Label("Placeholder for the transformers code Generator that can be used by anyone with gpu to turn all nouns in their text to pictures (The lambda labs code)")
- with gr.Tab("Simultanoues Practice Zone"):
- gr.Label("Audio based space where you must look at the corresponding text for the audio thats playing as simultaneous practice")
- gr.DataFrame(None, headers=["text", "audio"], label="Add text pairs to practice", interactive=True)
- gr.HTML("Below you can create and listen to the audio")
- gr.Interface(fn=SimultaneousSpellingPrac, inputs=["text"], outputs=["text"], title="Simultaneous SpellingOrder fast fast practice --> 1 letter a word = fastest read")
- gr.Interface(fn=FirstLetterSummary, inputs=["text"], outputs=["text"], title="Order fast fast practice --> 1 letter a word = fastest read")
- gr.Interface(fn=imagebasedreading, inputs=["text"], outputs=["file", "html", "text"], title="Placeholder for every newline")
- with gr.Tab("Long Text Analysis"):
- gr.Interface(fn=LoadNLTKUDHRText, inputs=NLTKudhr, outputs=["text", "textarea"])
- gr.HTML("For Long text searches are useful under time pressure and also bring all direct interactions with search terms - a word is defined by those around it")
- gr.Interface(fn=onlyplurals, inputs=["text"], outputs=["text"], title="Only plurals = optimal concepts to learn first as work = repitition")
- gr.Label("Placeholder for old code for concordance and word counting in other test space")
- with gr.Tab("Video Segmentation with OpenCV Test"):
- gr.Interface(fn=segment_video_with_opencv, inputs=VideoSplitTestInput, outputs="fileexplorer")
- with gr.Tab("State Management and Education"):
- gr.HTML("Education = Learning things you didnt know yesterday and not forgetting more than you learn
What you didnt know forms = Glossary Lists Formulas graphs Procedures
for each you will need a seperate way to track the progress but amount of times + recency = approximate state ")
-
-
-lliface.queue().launch(share="true") #(inbrowser="true")
\ No newline at end of file
diff --git a/spaces/KyanChen/RSPrompter/configs/rsprompter/samdet_fasterrcnn_whu_config.py b/spaces/KyanChen/RSPrompter/configs/rsprompter/samdet_fasterrcnn_whu_config.py
deleted file mode 100644
index a4d9123581ac61c511c160cebbea6ecb0cafdf30..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/configs/rsprompter/samdet_fasterrcnn_whu_config.py
+++ /dev/null
@@ -1,345 +0,0 @@
-custom_imports = dict(imports=['mmseg.datasets', 'mmseg.models'], allow_failed_imports=False)
-
-sub_model_train = [
- 'whole_model'
-]
-
-sub_model_optim = {
- 'whole_model': {'lr_mult': 1},
-}
-
-max_epochs = 100
-
-optimizer = dict(
- type='AdamW',
- sub_model=sub_model_optim,
- lr=0.0001,
- weight_decay=1e-3
-)
-
-param_scheduler = [
- # warm up learning rate scheduler
- dict(
- type='LinearLR',
- start_factor=1e-4,
- by_epoch=True,
- begin=0,
- end=1,
- # update by iter
- convert_to_iter_based=True),
- # main learning rate scheduler
- dict(
- type='CosineAnnealingLR',
- T_max=max_epochs,
- by_epoch=True,
- begin=1,
- end=max_epochs,
- ),
-]
-
-param_scheduler_callback = dict(
- type='ParamSchedulerHook'
-)
-
-evaluator_ = dict(
- type='CocoPLMetric',
- metric=['bbox', 'segm'],
- proposal_nums=[1, 10, 100]
-)
-
-evaluator = dict(
- # train_evaluator=evaluator_,
- val_evaluator=evaluator_,
-)
-
-
-image_size = (1024, 1024)
-
-data_preprocessor = dict(
- type='mmdet.DetDataPreprocessor',
- mean=[123.675, 116.28, 103.53],
- std=[58.395, 57.12, 57.375],
- bgr_to_rgb=True,
- pad_size_divisor=32,
- pad_mask=True,
- mask_pad_value=0,
-)
-
-num_things_classes = 1
-num_stuff_classes = 0
-num_classes = num_things_classes + num_stuff_classes
-
-model = dict(
- type='mmdet.FasterRCNN',
- data_preprocessor=data_preprocessor,
- backbone=dict(
- type='mmdet.ResNet',
- depth=50,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- frozen_stages=1,
- norm_cfg=dict(type='BN', requires_grad=True),
- norm_eval=True,
- style='pytorch',
- init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
- neck=dict(
- type='mmdet.FPN',
- in_channels=[256, 512, 1024, 2048],
- out_channels=256,
- num_outs=5),
- rpn_head=dict(
- type='mmdet.RPNHead',
- in_channels=256,
- feat_channels=256,
- anchor_generator=dict(
- type='mmdet.AnchorGenerator',
- scales=[8],
- ratios=[0.5, 1.0, 2.0],
- strides=[4, 8, 16, 32, 64]),
- bbox_coder=dict(
- type='mmdet.DeltaXYWHBBoxCoder',
- target_means=[.0, .0, .0, .0],
- target_stds=[1.0, 1.0, 1.0, 1.0]),
- loss_cls=dict(
- type='mmdet.CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
- loss_bbox=dict(type='mmdet.L1Loss', loss_weight=1.0)),
- roi_head=dict(
- type='mmdet.StandardRoIHead',
- bbox_roi_extractor=dict(
- type='mmdet.SingleRoIExtractor',
- roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
- out_channels=256,
- featmap_strides=[4, 8, 16, 32]),
- bbox_head=dict(
- type='mmdet.Shared2FCBBoxHead',
- in_channels=256,
- fc_out_channels=1024,
- roi_feat_size=7,
- num_classes=80,
- bbox_coder=dict(
- type='mmdet.DeltaXYWHBBoxCoder',
- target_means=[0., 0., 0., 0.],
- target_stds=[0.1, 0.1, 0.2, 0.2]),
- reg_class_agnostic=False,
- loss_cls=dict(
- type='mmdet.CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
- loss_bbox=dict(type='mmdet.L1Loss', loss_weight=1.0))),
- # model training and testing settings
- train_cfg=dict(
- rpn=dict(
- assigner=dict(
- type='mmdet.MaxIoUAssigner',
- pos_iou_thr=0.7,
- neg_iou_thr=0.3,
- min_pos_iou=0.3,
- match_low_quality=True,
- ignore_iof_thr=-1),
- sampler=dict(
- type='mmdet.RandomSampler',
- num=256,
- pos_fraction=0.5,
- neg_pos_ub=-1,
- add_gt_as_proposals=False),
- allowed_border=-1,
- pos_weight=-1,
- debug=False),
- rpn_proposal=dict(
- nms_pre=2000,
- max_per_img=1000,
- nms=dict(type='nms', iou_threshold=0.7),
- min_bbox_size=0),
- rcnn=dict(
- assigner=dict(
- type='mmdet.MaxIoUAssigner',
- pos_iou_thr=0.5,
- neg_iou_thr=0.5,
- min_pos_iou=0.5,
- match_low_quality=False,
- ignore_iof_thr=-1),
- sampler=dict(
- type='mmdet.RandomSampler',
- num=512,
- pos_fraction=0.25,
- neg_pos_ub=-1,
- add_gt_as_proposals=True),
- pos_weight=-1,
- debug=False)),
- test_cfg=dict(
- rpn=dict(
- nms_pre=1000,
- max_per_img=1000,
- nms=dict(type='nms', iou_threshold=0.7),
- min_bbox_size=0),
- rcnn=dict(
- score_thr=0.05,
- nms=dict(type='nms', iou_threshold=0.5),
- max_per_img=100)
- # soft-nms is also supported for rcnn testing
- # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
- ))
-
-model_cfg = dict(
- type='SegSAMDetPLer',
- hyperparameters=dict(
- optimizer=optimizer,
- param_scheduler=param_scheduler,
- evaluator=evaluator,
- ),
- need_train_names=sub_model_train,
- whole_model=model,
- backbone=dict(
- type='vit_h',
- checkpoint='pretrain/sam/sam_vit_h_4b8939.pth',
- # type='vit_b',
- # checkpoint='pretrain/sam/sam_vit_b_01ec64.pth',
- )
-)
-
-task_name = 'whu_ins'
-exp_name = 'E20230602_3'
-logger = dict(
- type='WandbLogger',
- project=task_name,
- group='samdet',
- name=exp_name
-)
-# logger = None
-
-callbacks = [
- param_scheduler_callback,
- dict(
- type='ModelCheckpoint',
- dirpath=f'results/{task_name}/{exp_name}/checkpoints',
- save_last=True,
- mode='max',
- monitor='valsegm_map_0',
- save_top_k=2,
- filename='epoch_{epoch}-map_{valsegm_map_0:.4f}'
- ),
- dict(
- type='LearningRateMonitor',
- logging_interval='step'
- )
-]
-
-
-trainer_cfg = dict(
- compiled_model=False,
- accelerator="auto",
- # strategy="auto",
- # strategy="ddp",
- strategy='ddp_find_unused_parameters_true',
- # precision='32',
- # precision='16-mixed',
- devices=8,
- default_root_dir=f'results/{task_name}/{exp_name}',
- # default_root_dir='results/tmp',
- max_epochs=max_epochs,
- logger=logger,
- callbacks=callbacks,
- log_every_n_steps=20,
- check_val_every_n_epoch=3,
- benchmark=True,
- # sync_batchnorm=True,
- # fast_dev_run=True,
-
- # limit_train_batches=1,
- # limit_val_batches=0,
- # limit_test_batches=None,
- # limit_predict_batches=None,
- # overfit_batches=0.0,
-
- # val_check_interval=None,
- # num_sanity_val_steps=0,
- # enable_checkpointing=None,
- # enable_progress_bar=None,
- # enable_model_summary=None,
- # accumulate_grad_batches=32,
- # gradient_clip_val=15,
- # gradient_clip_algorithm='norm',
- # deterministic=None,
- # inference_mode: bool=True,
- use_distributed_sampler=True,
- # profiler="simple",
- # detect_anomaly=False,
- # barebones=False,
- # plugins=None,
- # reload_dataloaders_every_n_epochs=0,
-)
-
-
-backend_args = None
-train_pipeline = [
- dict(type='mmdet.LoadImageFromFile'),
- dict(type='mmdet.LoadAnnotations', with_bbox=True, with_mask=True),
- dict(type='mmdet.Resize', scale=image_size),
- dict(type='mmdet.RandomFlip', prob=0.5),
- dict(type='mmdet.PackDetInputs')
-]
-
-test_pipeline = [
- dict(type='mmdet.LoadImageFromFile', backend_args=backend_args),
- dict(type='mmdet.Resize', scale=image_size),
- # If you don't have a gt annotation, delete the pipeline
- dict(type='mmdet.LoadAnnotations', with_bbox=True, with_mask=True),
- dict(
- type='mmdet.PackDetInputs',
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
- 'scale_factor'))
-]
-
-
-train_batch_size_per_gpu = 4
-train_num_workers = 4
-test_batch_size_per_gpu = 4
-test_num_workers = 4
-persistent_workers = True
-
-data_parent = '/mnt/search01/dataset/cky_data/WHU'
-train_data_prefix = 'train/'
-val_data_prefix = 'test/'
-dataset_type = 'WHUInsSegDataset'
-
-val_loader = dict(
- batch_size=test_batch_size_per_gpu,
- num_workers=test_num_workers,
- persistent_workers=persistent_workers,
- pin_memory=True,
- dataset=dict(
- type=dataset_type,
- data_root=data_parent,
- # ann_file='NWPU_instances_val.json',
- # data_prefix=dict(img_path='positive image set'),
- # ann_file='annotations/SSDD_instances_val.json',
- # data_prefix=dict(img_path='imgs'),
- ann_file='annotations/WHU_building_test.json',
- data_prefix=dict(img_path=val_data_prefix + '/image'),
- test_mode=True,
- filter_cfg=dict(filter_empty_gt=True, min_size=32),
- pipeline=test_pipeline,
- backend_args=backend_args))
-
-datamodule_cfg = dict(
- type='PLDataModule',
- train_loader=dict(
- batch_size=train_batch_size_per_gpu,
- num_workers=train_num_workers,
- persistent_workers=persistent_workers,
- pin_memory=True,
- dataset=dict(
- type=dataset_type,
- data_root=data_parent,
- # ann_file='NWPU_instances_train.json',
- # data_prefix=dict(img_path='positive image set'),
- # ann_file='annotations/SSDD_instances_train.json',
- # data_prefix=dict(img_path='imgs'),
- ann_file='annotations/WHU_building_train.json',
- data_prefix=dict(img_path=train_data_prefix + '/image'),
- filter_cfg=dict(filter_empty_gt=True, min_size=32),
- pipeline=train_pipeline,
- backend_args=backend_args)
- ),
- val_loader=val_loader,
- # test_loader=val_loader
- predict_loader=val_loader
-)
\ No newline at end of file
diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/backbones/mobilenet_v2.py b/spaces/KyanChen/RSPrompter/mmdet/models/backbones/mobilenet_v2.py
deleted file mode 100644
index a4fd0519ad4d5106e1acb82624d6393052596ce8..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/models/backbones/mobilenet_v2.py
+++ /dev/null
@@ -1,198 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import warnings
-
-import torch.nn as nn
-from mmcv.cnn import ConvModule
-from mmengine.model import BaseModule
-from torch.nn.modules.batchnorm import _BatchNorm
-
-from mmdet.registry import MODELS
-from ..layers import InvertedResidual
-from ..utils import make_divisible
-
-
-@MODELS.register_module()
-class MobileNetV2(BaseModule):
- """MobileNetV2 backbone.
-
- Args:
- widen_factor (float): Width multiplier, multiply number of
- channels in each layer by this amount. Default: 1.0.
- out_indices (Sequence[int], optional): Output from which stages.
- Default: (1, 2, 4, 7).
- frozen_stages (int): Stages to be frozen (all param fixed).
- Default: -1, which means not freezing any parameters.
- conv_cfg (dict, optional): Config dict for convolution layer.
- Default: None, which means using conv2d.
- norm_cfg (dict): Config dict for normalization layer.
- Default: dict(type='BN').
- act_cfg (dict): Config dict for activation layer.
- Default: dict(type='ReLU6').
- norm_eval (bool): Whether to set norm layers to eval mode, namely,
- freeze running stats (mean and var). Note: Effect on Batch Norm
- and its variants only. Default: False.
- with_cp (bool): Use checkpoint or not. Using checkpoint will save some
- memory while slowing down the training speed. Default: False.
- pretrained (str, optional): model pretrained path. Default: None
- init_cfg (dict or list[dict], optional): Initialization config dict.
- Default: None
- """
-
- # Parameters to build layers. 4 parameters are needed to construct a
- # layer, from left to right: expand_ratio, channel, num_blocks, stride.
- arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2],
- [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2],
- [6, 320, 1, 1]]
-
- def __init__(self,
- widen_factor=1.,
- out_indices=(1, 2, 4, 7),
- frozen_stages=-1,
- conv_cfg=None,
- norm_cfg=dict(type='BN'),
- act_cfg=dict(type='ReLU6'),
- norm_eval=False,
- with_cp=False,
- pretrained=None,
- init_cfg=None):
- super(MobileNetV2, self).__init__(init_cfg)
-
- self.pretrained = pretrained
- assert not (init_cfg and pretrained), \
- 'init_cfg and pretrained cannot be specified at the same time'
- if isinstance(pretrained, str):
- warnings.warn('DeprecationWarning: pretrained is deprecated, '
- 'please use "init_cfg" instead')
- self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
- elif pretrained is None:
- if init_cfg is None:
- self.init_cfg = [
- dict(type='Kaiming', layer='Conv2d'),
- dict(
- type='Constant',
- val=1,
- layer=['_BatchNorm', 'GroupNorm'])
- ]
- else:
- raise TypeError('pretrained must be a str or None')
-
- self.widen_factor = widen_factor
- self.out_indices = out_indices
- if not set(out_indices).issubset(set(range(0, 8))):
- raise ValueError('out_indices must be a subset of range'
- f'(0, 8). But received {out_indices}')
-
- if frozen_stages not in range(-1, 8):
- raise ValueError('frozen_stages must be in range(-1, 8). '
- f'But received {frozen_stages}')
- self.out_indices = out_indices
- self.frozen_stages = frozen_stages
- self.conv_cfg = conv_cfg
- self.norm_cfg = norm_cfg
- self.act_cfg = act_cfg
- self.norm_eval = norm_eval
- self.with_cp = with_cp
-
- self.in_channels = make_divisible(32 * widen_factor, 8)
-
- self.conv1 = ConvModule(
- in_channels=3,
- out_channels=self.in_channels,
- kernel_size=3,
- stride=2,
- padding=1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
-
- self.layers = []
-
- for i, layer_cfg in enumerate(self.arch_settings):
- expand_ratio, channel, num_blocks, stride = layer_cfg
- out_channels = make_divisible(channel * widen_factor, 8)
- inverted_res_layer = self.make_layer(
- out_channels=out_channels,
- num_blocks=num_blocks,
- stride=stride,
- expand_ratio=expand_ratio)
- layer_name = f'layer{i + 1}'
- self.add_module(layer_name, inverted_res_layer)
- self.layers.append(layer_name)
-
- if widen_factor > 1.0:
- self.out_channel = int(1280 * widen_factor)
- else:
- self.out_channel = 1280
-
- layer = ConvModule(
- in_channels=self.in_channels,
- out_channels=self.out_channel,
- kernel_size=1,
- stride=1,
- padding=0,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
- self.add_module('conv2', layer)
- self.layers.append('conv2')
-
- def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
- """Stack InvertedResidual blocks to build a layer for MobileNetV2.
-
- Args:
- out_channels (int): out_channels of block.
- num_blocks (int): number of blocks.
- stride (int): stride of the first block. Default: 1
- expand_ratio (int): Expand the number of channels of the
- hidden layer in InvertedResidual by this ratio. Default: 6.
- """
- layers = []
- for i in range(num_blocks):
- if i >= 1:
- stride = 1
- layers.append(
- InvertedResidual(
- self.in_channels,
- out_channels,
- mid_channels=int(round(self.in_channels * expand_ratio)),
- stride=stride,
- with_expand_conv=expand_ratio != 1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg,
- with_cp=self.with_cp))
- self.in_channels = out_channels
-
- return nn.Sequential(*layers)
-
- def _freeze_stages(self):
- if self.frozen_stages >= 0:
- for param in self.conv1.parameters():
- param.requires_grad = False
- for i in range(1, self.frozen_stages + 1):
- layer = getattr(self, f'layer{i}')
- layer.eval()
- for param in layer.parameters():
- param.requires_grad = False
-
- def forward(self, x):
- """Forward function."""
- x = self.conv1(x)
- outs = []
- for i, layer_name in enumerate(self.layers):
- layer = getattr(self, layer_name)
- x = layer(x)
- if i in self.out_indices:
- outs.append(x)
- return tuple(outs)
-
- def train(self, mode=True):
- """Convert the model into training mode while keep normalization layer
- frozen."""
- super(MobileNetV2, self).train(mode)
- self._freeze_stages()
- if mode and self.norm_eval:
- for m in self.modules():
- # trick: eval have effect on BatchNorm only
- if isinstance(m, _BatchNorm):
- m.eval()
diff --git a/spaces/LanQian/ChatChuanHu/utils.py b/spaces/LanQian/ChatChuanHu/utils.py
deleted file mode 100644
index 96165bc58aff5d820af42e1724af27435c471fb8..0000000000000000000000000000000000000000
--- a/spaces/LanQian/ChatChuanHu/utils.py
+++ /dev/null
@@ -1,424 +0,0 @@
-# -*- coding:utf-8 -*-
-from __future__ import annotations
-from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
-import logging
-import json
-import gradio as gr
-# import openai
-import os
-import traceback
-import requests
-# import markdown
-import csv
-import mdtex2html
-from pypinyin import lazy_pinyin
-from presets import *
-import tiktoken
-from tqdm import tqdm
-import colorama
-from duckduckgo_search import ddg
-import datetime
-
-# logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s")
-
-if TYPE_CHECKING:
- from typing import TypedDict
-
- class DataframeData(TypedDict):
- headers: List[str]
- data: List[List[str | int | bool]]
-
-initial_prompt = "You are a helpful assistant."
-API_URL = "https://api.openai.com/v1/chat/completions"
-HISTORY_DIR = "history"
-TEMPLATES_DIR = "templates"
-
-def postprocess(
- self, y: List[Tuple[str | None, str | None]]
- ) -> List[Tuple[str | None, str | None]]:
- """
- Parameters:
- y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format.
- Returns:
- List of tuples representing the message and response. Each message and response will be a string of HTML.
- """
- if y is None:
- return []
- for i, (message, response) in enumerate(y):
- y[i] = (
- # None if message is None else markdown.markdown(message),
- # None if response is None else markdown.markdown(response),
- None if message is None else mdtex2html.convert((message)),
- None if response is None else mdtex2html.convert(response),
- )
- return y
-
-def count_token(input_str):
- encoding = tiktoken.get_encoding("cl100k_base")
- length = len(encoding.encode(input_str))
- return length
-
-def parse_text(text):
- lines = text.split("\n")
- lines = [line for line in lines if line != ""]
- count = 0
- for i, line in enumerate(lines):
- if "```" in line:
- count += 1
- items = line.split('`')
- if count % 2 == 1:
- lines[i] = f'
-
-**Notice**: The output is generated by top-k sampling scheme and may involve some randomness. For multiple images and video, we cannot ensure its performance since only image-text / video-text pairs are used during training.
-
-**We recommend only one image or video per conversation session.** If you want to start chatting with new images or videos, we recommend you to **CLEAR** the history to restart.
-
-""")
-
-tos_markdown = ("""
-### Terms of use
-By using this service, users are required to agree to the following terms:
-The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research.
-Please click the "Flag" button if you get any inappropriate answer! We will collect those to keep improving our moderator.
-For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality.
-
-**Copyright 2023 Alibaba DAMO Academy.**
-""")
-
-learn_more_markdown = ("""
-### License
-The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT. Please contact us if you find any potential violation.
-""")
-
-css = code_highlight_css + """
-pre {
- white-space: pre-wrap; /* Since CSS 2.1 */
- white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
- white-space: -pre-wrap; /* Opera 4-6 */
- white-space: -o-pre-wrap; /* Opera 7 */
- word-wrap: break-word; /* Internet Explorer 5.5+ */
-}
-"""
-
-def build_demo():
- # with gr.Blocks(title="mPLUG-Owl🦉", theme=gr.themes.Base(), css=css) as demo:
- with gr.Blocks(title="mPLUG-Owl🦉", css=css) as demo:
- state = gr.State()
- gr.Markdown(SHARED_UI_WARNING)
-
- gr.Markdown(title_markdown)
-
- with gr.Row():
- with gr.Column(scale=3):
-
- imagebox = gr.Image(type="pil")
- videobox = gr.Video()
-
- with gr.Accordion("Parameters", open=True, visible=False) as parameter_row:
- max_output_tokens = gr.Slider(minimum=0, maximum=1024, value=512, step=64, interactive=True, label="Max output tokens",)
- temperature = gr.Slider(minimum=0, maximum=1, value=1, step=0.1, interactive=True, label="Temperature",)
- top_k = gr.Slider(minimum=1, maximum=5, value=3, step=1, interactive=True, label="Top K",)
- top_p = gr.Slider(minimum=0, maximum=1, value=0.9, step=0.1, interactive=True, label="Top p",)
- length_penalty = gr.Slider(minimum=1, maximum=5, value=1, step=0.1, interactive=True, label="length_penalty",)
- num_beams = gr.Slider(minimum=1, maximum=5, value=1, step=1, interactive=True, label="Beam Size",)
- no_repeat_ngram_size = gr.Slider(minimum=1, maximum=5, value=2, step=1, interactive=True, label="no_repeat_ngram_size",)
- num_frames = gr.Slider(minimum=8, maximum=32, value=8, step=4, interactive=True, label="Number of Frames",)
- do_sample = gr.Checkbox(interactive=True, value=True, label="do_sample")
-
- gr.Markdown(tos_markdown)
-
- with gr.Column(scale=6):
- chatbot = grChatbot(elem_id="chatbot", visible=False).style(height=1000)
- with gr.Row():
- with gr.Column(scale=8):
- textbox = gr.Textbox(show_label=False,
- placeholder="Enter text and press ENTER", visible=False).style(container=False)
- with gr.Column(scale=1, min_width=60):
- submit_btn = gr.Button(value="Submit", visible=False)
- with gr.Row(visible=False) as button_row:
- upvote_btn = gr.Button(value="👍 Upvote", interactive=False)
- downvote_btn = gr.Button(value="👎 Downvote", interactive=False)
- flag_btn = gr.Button(value="⚠️ Flag", interactive=False)
- regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
- clear_btn = gr.Button(value="🗑️ Clear history", interactive=False)
-
- gr.Examples(examples=[
- [f"examples/monday.jpg", "Explain why this meme is funny."],
- [f'examples/rap.jpeg', 'Can you write me a master rap song that rhymes very well based on this image?'],
- [f'examples/titanic.jpeg', 'What happened at the end of this movie?'],
- [f'examples/vga.jpeg', 'What is funny about this image? Describe it panel by panel.'],
- [f'examples/mug_ad.jpeg', 'We design new mugs shown in the image. Can you help us write an advertisement?'],
- [f'examples/laundry.jpeg', 'Why this happens and how to fix it?'],
- [f'examples/ca.jpeg', "What do you think about the person's behavior?"],
- [f'examples/monalisa-fun.jpg', 'Do you know who drew this painting?'],
- ], inputs=[imagebox, textbox])
-
- gr.Markdown(learn_more_markdown)
- url_params = gr.JSON(visible=False)
-
- btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn]
- parameter_list = [
- num_frames, max_output_tokens, temperature, top_k, top_p,
- num_beams, no_repeat_ngram_size, length_penalty,
- do_sample
- ]
- upvote_btn.click(upvote_last_response,
- [state], [textbox, upvote_btn, downvote_btn, flag_btn])
- downvote_btn.click(downvote_last_response,
- [state], [textbox, upvote_btn, downvote_btn, flag_btn])
- flag_btn.click(flag_last_response,
- [state], [textbox, upvote_btn, downvote_btn, flag_btn])
- # regenerate_btn.click(regenerate, state,
- # [state, chatbot, textbox, imagebox, videobox] + btn_list).then(
- # http_bot, [state] + parameter_list,
- # [state, chatbot] + btn_list)
- regenerate_btn.click(regenerate_http_bot, [state] + parameter_list,
- [state, chatbot, textbox, imagebox, videobox] + btn_list)
-
- clear_btn.click(clear_history, None, [state, chatbot, textbox, imagebox, videobox] + btn_list)
-
- # textbox.submit(add_text, [state, textbox, imagebox, videobox], [state, chatbot, textbox, imagebox, videobox] + btn_list
- # ).then(http_bot, [state] + parameter_list,
- # [state, chatbot] + btn_list)
- # submit_btn.click(add_text, [state, textbox, imagebox, videobox], [state, chatbot, textbox, imagebox, videobox] + btn_list
- # ).then(http_bot, [state] + parameter_list,
- # [state, chatbot] + btn_list)
-
- textbox.submit(add_text_http_bot,
- [state, textbox, imagebox, videobox] + parameter_list,
- [state, chatbot, textbox, imagebox, videobox] + btn_list
- )
-
- submit_btn.click(add_text_http_bot,
- [state, textbox, imagebox, videobox] + parameter_list,
- [state, chatbot, textbox, imagebox, videobox] + btn_list
- )
-
- demo.load(load_demo, [url_params], [state,
- chatbot, textbox, submit_btn, button_row, parameter_row],
- _js=get_window_url_params)
-
- return demo
-
-if __name__ == "__main__":
- io = init()
-
- parser = argparse.ArgumentParser()
- parser.add_argument("--host", type=str, default="0.0.0.0")
- parser.add_argument("--debug", action="store_true", help="using debug mode")
- parser.add_argument("--port", type=int)
- parser.add_argument("--concurrency-count", type=int, default=1)
- parser.add_argument("--base-model",type=str, default='./')
- parser.add_argument("--load-8bit", action="store_true", help="using 8bit mode")
- parser.add_argument("--bf16", action="store_true", default=True, help="using 8bit mode")
- args = parser.parse_args()
-
- if torch.cuda.is_available():
- device = "cuda"
- else:
- device = "cpu"
-
- model = mPLUG_Owl_Server(
- base_model=args.base_model,
- load_in_8bit=args.load_8bit,
- bf16=args.bf16,
- device=device,
- io=io
- )
- demo = build_demo()
- demo.queue(concurrency_count=args.concurrency_count, status_update_rate=10, api_open=False).launch(server_name=args.host, debug=args.debug, server_port=args.port, share=False)
-
diff --git a/spaces/MAGAer13/mPLUG-Owl2/mplug_owl2/model/builder.py b/spaces/MAGAer13/mPLUG-Owl2/mplug_owl2/model/builder.py
deleted file mode 100644
index d3bd0046249aac2f315840b875924ae0d7eba22f..0000000000000000000000000000000000000000
--- a/spaces/MAGAer13/mPLUG-Owl2/mplug_owl2/model/builder.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# Copyright 2023 Haotian Liu
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import os
-import warnings
-import shutil
-
-from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, BitsAndBytesConfig
-from transformers.models.clip.image_processing_clip import CLIPImageProcessor
-import torch
-from mplug_owl2.model import *
-from icecream import ic
-def load_pretrained_model(model_path, model_base, model_name, load_8bit=False, load_4bit=False, device_map="auto", device="cuda"):
- kwargs = {"device_map": device_map}
-
- if device != "cuda":
- kwargs['device_map'] = {"": device}
-
- if load_8bit:
- kwargs['load_in_8bit'] = True
- elif load_4bit:
- kwargs['load_in_4bit'] = True
- kwargs['quantization_config'] = BitsAndBytesConfig(
- load_in_4bit=True,
- bnb_4bit_compute_dtype=torch.float16,
- bnb_4bit_use_double_quant=True,
- bnb_4bit_quant_type='nf4'
- )
- else:
- kwargs['torch_dtype'] = torch.float16
- if 'mplug_owl2' in model_name.lower():
- # Load LLaVA model
- if 'lora' in model_name.lower() and model_base is None:
- warnings.warn('There is `lora` in model name but no `model_base` is provided. If you are loading a LoRA model, please provide the `model_base` argument. Detailed instruction: https://github.com/haotian-liu/LLaVA#launch-a-model-worker-lora-weights-unmerged.')
- if 'lora' in model_name.lower() and model_base is not None:
- lora_cfg_pretrained = AutoConfig.from_pretrained(model_path)
- tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
- print('Loading mPLUG-Owl2 from base model...')
- model = MPLUGOwl2LlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=lora_cfg_pretrained, **kwargs)
- token_num, tokem_dim = model.lm_head.out_features, model.lm_head.in_features
- if model.lm_head.weight.shape[0] != token_num:
- model.lm_head.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
- model.model.embed_tokens.weight = torch.nn.Parameter(torch.empty(token_num, tokem_dim, device=model.device, dtype=model.dtype))
-
- print('Loading additional mPLUG-Owl2 weights...')
- if os.path.exists(os.path.join(model_path, 'non_lora_trainables.bin')):
- non_lora_trainables = torch.load(os.path.join(model_path, 'non_lora_trainables.bin'), map_location='cpu')
- else:
- # this is probably from HF Hub
- from huggingface_hub import hf_hub_download
- def load_from_hf(repo_id, filename, subfolder=None):
- cache_file = hf_hub_download(
- repo_id=repo_id,
- filename=filename,
- subfolder=subfolder)
- return torch.load(cache_file, map_location='cpu')
- non_lora_trainables = load_from_hf(model_path, 'non_lora_trainables.bin')
- non_lora_trainables = {(k[11:] if k.startswith('base_model.') else k): v for k, v in non_lora_trainables.items()}
- if any(k.startswith('model.model.') for k in non_lora_trainables):
- non_lora_trainables = {(k[6:] if k.startswith('model.') else k): v for k, v in non_lora_trainables.items()}
- model.load_state_dict(non_lora_trainables, strict=False)
-
- from peft import PeftModel
- print('Loading LoRA weights...')
- model = PeftModel.from_pretrained(model, model_path)
- print('Merging LoRA weights...')
- model = model.merge_and_unload()
- print('Model is loaded...')
- elif model_base is not None:
- # this may be mm projector only
- print('Loading mPLUG-Owl2 from base model...')
- tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
- cfg_pretrained = AutoConfig.from_pretrained(model_path)
- model = MPLUGOwl2LlamaForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, config=cfg_pretrained, **kwargs)
- else:
- tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
- model = MPLUGOwl2LlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
- else:
- # Load language model
- if model_base is not None:
- # PEFT model
- from peft import PeftModel
- tokenizer = AutoTokenizer.from_pretrained(model_base, use_fast=False)
- model = AutoModelForCausalLM.from_pretrained(model_base, low_cpu_mem_usage=True, **kwargs)
- print(f"Loading LoRA weights from {model_path}")
- model = PeftModel.from_pretrained(model, model_path)
- print(f"Merging weights")
- model = model.merge_and_unload()
- print('Convert to FP16...')
- model.to(torch.float16)
- else:
- use_fast = False
- tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
- model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, **kwargs)
-
-
- vision_tower = model.get_model().vision_model
- vision_tower.to(device=device, dtype=torch.float16)
- image_processor = CLIPImageProcessor.from_pretrained(model_path)
-
- if hasattr(model.config, "max_sequence_length"):
- context_len = model.config.max_sequence_length
- else:
- context_len = 2048
-
- return tokenizer, model, image_processor, context_len
\ No newline at end of file
diff --git a/spaces/MCkernick/Image_Restoration_Colorization/Face_Enhancement/util/util.py b/spaces/MCkernick/Image_Restoration_Colorization/Face_Enhancement/util/util.py
deleted file mode 100644
index e18b4a26082449977b27a4c1506649a2447988b1..0000000000000000000000000000000000000000
--- a/spaces/MCkernick/Image_Restoration_Colorization/Face_Enhancement/util/util.py
+++ /dev/null
@@ -1,210 +0,0 @@
-# Copyright (c) Microsoft Corporation.
-# Licensed under the MIT License.
-
-import re
-import importlib
-import torch
-from argparse import Namespace
-import numpy as np
-from PIL import Image
-import os
-import argparse
-import dill as pickle
-
-
-def save_obj(obj, name):
- with open(name, "wb") as f:
- pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
-
-
-def load_obj(name):
- with open(name, "rb") as f:
- return pickle.load(f)
-
-
-def copyconf(default_opt, **kwargs):
- conf = argparse.Namespace(**vars(default_opt))
- for key in kwargs:
- print(key, kwargs[key])
- setattr(conf, key, kwargs[key])
- return conf
-
-
-# Converts a Tensor into a Numpy array
-# |imtype|: the desired type of the converted numpy array
-def tensor2im(image_tensor, imtype=np.uint8, normalize=True, tile=False):
- if isinstance(image_tensor, list):
- image_numpy = []
- for i in range(len(image_tensor)):
- image_numpy.append(tensor2im(image_tensor[i], imtype, normalize))
- return image_numpy
-
- if image_tensor.dim() == 4:
- # transform each image in the batch
- images_np = []
- for b in range(image_tensor.size(0)):
- one_image = image_tensor[b]
- one_image_np = tensor2im(one_image)
- images_np.append(one_image_np.reshape(1, *one_image_np.shape))
- images_np = np.concatenate(images_np, axis=0)
-
- return images_np
-
- if image_tensor.dim() == 2:
- image_tensor = image_tensor.unsqueeze(0)
- image_numpy = image_tensor.detach().cpu().float().numpy()
- if normalize:
- image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
- else:
- image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0
- image_numpy = np.clip(image_numpy, 0, 255)
- if image_numpy.shape[2] == 1:
- image_numpy = image_numpy[:, :, 0]
- return image_numpy.astype(imtype)
-
-
-# Converts a one-hot tensor into a colorful label map
-def tensor2label(label_tensor, n_label, imtype=np.uint8, tile=False):
- if label_tensor.dim() == 4:
- # transform each image in the batch
- images_np = []
- for b in range(label_tensor.size(0)):
- one_image = label_tensor[b]
- one_image_np = tensor2label(one_image, n_label, imtype)
- images_np.append(one_image_np.reshape(1, *one_image_np.shape))
- images_np = np.concatenate(images_np, axis=0)
- # if tile:
- # images_tiled = tile_images(images_np)
- # return images_tiled
- # else:
- # images_np = images_np[0]
- # return images_np
- return images_np
-
- if label_tensor.dim() == 1:
- return np.zeros((64, 64, 3), dtype=np.uint8)
- if n_label == 0:
- return tensor2im(label_tensor, imtype)
- label_tensor = label_tensor.cpu().float()
- if label_tensor.size()[0] > 1:
- label_tensor = label_tensor.max(0, keepdim=True)[1]
- label_tensor = Colorize(n_label)(label_tensor)
- label_numpy = np.transpose(label_tensor.numpy(), (1, 2, 0))
- result = label_numpy.astype(imtype)
- return result
-
-
-def save_image(image_numpy, image_path, create_dir=False):
- if create_dir:
- os.makedirs(os.path.dirname(image_path), exist_ok=True)
- if len(image_numpy.shape) == 2:
- image_numpy = np.expand_dims(image_numpy, axis=2)
- if image_numpy.shape[2] == 1:
- image_numpy = np.repeat(image_numpy, 3, 2)
- image_pil = Image.fromarray(image_numpy)
-
- # save to png
- image_pil.save(image_path.replace(".jpg", ".png"))
-
-
-def mkdirs(paths):
- if isinstance(paths, list) and not isinstance(paths, str):
- for path in paths:
- mkdir(path)
- else:
- mkdir(paths)
-
-
-def mkdir(path):
- if not os.path.exists(path):
- os.makedirs(path)
-
-
-def atoi(text):
- return int(text) if text.isdigit() else text
-
-
-def natural_keys(text):
- """
- alist.sort(key=natural_keys) sorts in human order
- http://nedbatchelder.com/blog/200712/human_sorting.html
- (See Toothy's implementation in the comments)
- """
- return [atoi(c) for c in re.split("(\d+)", text)]
-
-
-def natural_sort(items):
- items.sort(key=natural_keys)
-
-
-def str2bool(v):
- if v.lower() in ("yes", "true", "t", "y", "1"):
- return True
- elif v.lower() in ("no", "false", "f", "n", "0"):
- return False
- else:
- raise argparse.ArgumentTypeError("Boolean value expected.")
-
-
-def find_class_in_module(target_cls_name, module):
- target_cls_name = target_cls_name.replace("_", "").lower()
- clslib = importlib.import_module(module)
- cls = None
- for name, clsobj in clslib.__dict__.items():
- if name.lower() == target_cls_name:
- cls = clsobj
-
- if cls is None:
- print(
- "In %s, there should be a class whose name matches %s in lowercase without underscore(_)"
- % (module, target_cls_name)
- )
- exit(0)
-
- return cls
-
-
-def save_network(net, label, epoch, opt):
- save_filename = "%s_net_%s.pth" % (epoch, label)
- save_path = os.path.join(opt.checkpoints_dir, opt.name, save_filename)
- torch.save(net.cpu().state_dict(), save_path)
- if len(opt.gpu_ids) and torch.cuda.is_available():
- net.cuda()
-
-
-def load_network(net, label, epoch, opt):
- save_filename = "%s_net_%s.pth" % (epoch, label)
- save_dir = os.path.join(opt.checkpoints_dir, opt.name)
- save_path = os.path.join(save_dir, save_filename)
- if os.path.exists(save_path):
- weights = torch.load(save_path)
- net.load_state_dict(weights)
- return net
-
-
-###############################################################################
-# Code from
-# https://github.com/ycszen/pytorch-seg/blob/master/transform.py
-# Modified so it complies with the Citscape label map colors
-###############################################################################
-def uint82bin(n, count=8):
- """returns the binary of integer n, count refers to amount of bits"""
- return "".join([str((n >> y) & 1) for y in range(count - 1, -1, -1)])
-
-
-class Colorize(object):
- def __init__(self, n=35):
- self.cmap = labelcolormap(n)
- self.cmap = torch.from_numpy(self.cmap[:n])
-
- def __call__(self, gray_image):
- size = gray_image.size()
- color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0)
-
- for label in range(0, len(self.cmap)):
- mask = (label == gray_image[0]).cpu()
- color_image[0][mask] = self.cmap[label][0]
- color_image[1][mask] = self.cmap[label][1]
- color_image[2][mask] = self.cmap[label][2]
-
- return color_image
diff --git a/spaces/Manmay/tortoise-tts/tortoise/utils/audio.py b/spaces/Manmay/tortoise-tts/tortoise/utils/audio.py
deleted file mode 100644
index 6842af57caa067c42896947cb1744aae29041d01..0000000000000000000000000000000000000000
--- a/spaces/Manmay/tortoise-tts/tortoise/utils/audio.py
+++ /dev/null
@@ -1,189 +0,0 @@
-import os
-from glob import glob
-
-import librosa
-import torch
-import torchaudio
-import numpy as np
-from scipy.io.wavfile import read
-
-from tortoise.utils.stft import STFT
-
-
-BUILTIN_VOICES_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../voices')
-
-
-def load_wav_to_torch(full_path):
- sampling_rate, data = read(full_path)
- if data.dtype == np.int32:
- norm_fix = 2 ** 31
- elif data.dtype == np.int16:
- norm_fix = 2 ** 15
- elif data.dtype == np.float16 or data.dtype == np.float32:
- norm_fix = 1.
- else:
- raise NotImplemented(f"Provided data dtype not supported: {data.dtype}")
- return (torch.FloatTensor(data.astype(np.float32)) / norm_fix, sampling_rate)
-
-
-def load_audio(audiopath, sampling_rate):
- if audiopath[-4:] == '.wav':
- audio, lsr = load_wav_to_torch(audiopath)
- elif audiopath[-4:] == '.mp3':
- audio, lsr = librosa.load(audiopath, sr=sampling_rate)
- audio = torch.FloatTensor(audio)
- else:
- assert False, f"Unsupported audio format provided: {audiopath[-4:]}"
-
- # Remove any channel data.
- if len(audio.shape) > 1:
- if audio.shape[0] < 5:
- audio = audio[0]
- else:
- assert audio.shape[1] < 5
- audio = audio[:, 0]
-
- if lsr != sampling_rate:
- audio = torchaudio.functional.resample(audio, lsr, sampling_rate)
-
- # Check some assumptions about audio range. This should be automatically fixed in load_wav_to_torch, but might not be in some edge cases, where we should squawk.
- # '2' is arbitrarily chosen since it seems like audio will often "overdrive" the [-1,1] bounds.
- if torch.any(audio > 2) or not torch.any(audio < 0):
- print(f"Error with {audiopath}. Max={audio.max()} min={audio.min()}")
- audio.clip_(-1, 1)
-
- return audio.unsqueeze(0)
-
-
-TACOTRON_MEL_MAX = 2.3143386840820312
-TACOTRON_MEL_MIN = -11.512925148010254
-
-
-def denormalize_tacotron_mel(norm_mel):
- return ((norm_mel+1)/2)*(TACOTRON_MEL_MAX-TACOTRON_MEL_MIN)+TACOTRON_MEL_MIN
-
-
-def normalize_tacotron_mel(mel):
- return 2 * ((mel - TACOTRON_MEL_MIN) / (TACOTRON_MEL_MAX - TACOTRON_MEL_MIN)) - 1
-
-
-def dynamic_range_compression(x, C=1, clip_val=1e-5):
- """
- PARAMS
- ------
- C: compression factor
- """
- return torch.log(torch.clamp(x, min=clip_val) * C)
-
-
-def dynamic_range_decompression(x, C=1):
- """
- PARAMS
- ------
- C: compression factor used to compress
- """
- return torch.exp(x) / C
-
-
-def get_voices(extra_voice_dirs=[]):
- dirs = [BUILTIN_VOICES_DIR] + extra_voice_dirs
- voices = {}
- for d in dirs:
- subs = os.listdir(d)
- for sub in subs:
- subj = os.path.join(d, sub)
- if os.path.isdir(subj):
- voices[sub] = list(glob(f'{subj}/*.wav')) + list(glob(f'{subj}/*.mp3')) + list(glob(f'{subj}/*.pth'))
- return voices
-
-
-def load_voice(voice, extra_voice_dirs=[]):
- if voice == 'random':
- return None, None
-
- voices = get_voices(extra_voice_dirs)
- paths = voices[voice]
- if len(paths) == 1 and paths[0].endswith('.pth'):
- return None, torch.load(paths[0])
- else:
- conds = []
- for cond_path in paths:
- c = load_audio(cond_path, 22050)
- conds.append(c)
- return conds, None
-
-
-def load_voices(voices, extra_voice_dirs=[]):
- latents = []
- clips = []
- for voice in voices:
- if voice == 'random':
- if len(voices) > 1:
- print("Cannot combine a random voice with a non-random voice. Just using a random voice.")
- return None, None
- clip, latent = load_voice(voice, extra_voice_dirs)
- if latent is None:
- assert len(latents) == 0, "Can only combine raw audio voices or latent voices, not both. Do it yourself if you want this."
- clips.extend(clip)
- elif clip is None:
- assert len(clips) == 0, "Can only combine raw audio voices or latent voices, not both. Do it yourself if you want this."
- latents.append(latent)
- if len(latents) == 0:
- return clips, None
- else:
- latents_0 = torch.stack([l[0] for l in latents], dim=0).mean(dim=0)
- latents_1 = torch.stack([l[1] for l in latents], dim=0).mean(dim=0)
- latents = (latents_0,latents_1)
- return None, latents
-
-
-class TacotronSTFT(torch.nn.Module):
- def __init__(self, filter_length=1024, hop_length=256, win_length=1024,
- n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,
- mel_fmax=8000.0):
- super(TacotronSTFT, self).__init__()
- self.n_mel_channels = n_mel_channels
- self.sampling_rate = sampling_rate
- self.stft_fn = STFT(filter_length, hop_length, win_length)
- from librosa.filters import mel as librosa_mel_fn
- mel_basis = librosa_mel_fn(
- sr=sampling_rate, n_fft=filter_length, n_mels=n_mel_channels, fmin=mel_fmin, fmax=mel_fmax)
- mel_basis = torch.from_numpy(mel_basis).float()
- self.register_buffer('mel_basis', mel_basis)
-
- def spectral_normalize(self, magnitudes):
- output = dynamic_range_compression(magnitudes)
- return output
-
- def spectral_de_normalize(self, magnitudes):
- output = dynamic_range_decompression(magnitudes)
- return output
-
- def mel_spectrogram(self, y):
- """Computes mel-spectrograms from a batch of waves
- PARAMS
- ------
- y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
-
- RETURNS
- -------
- mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
- """
- assert(torch.min(y.data) >= -10)
- assert(torch.max(y.data) <= 10)
- y = torch.clip(y, min=-1, max=1)
-
- magnitudes, phases = self.stft_fn.transform(y)
- magnitudes = magnitudes.data
- mel_output = torch.matmul(self.mel_basis, magnitudes)
- mel_output = self.spectral_normalize(mel_output)
- return mel_output
-
-
-def wav_to_univnet_mel(wav, do_normalization=False, device='cuda' if not torch.backends.mps.is_available() else 'mps'):
- stft = TacotronSTFT(1024, 256, 1024, 100, 24000, 0, 12000)
- stft = stft.to(device)
- mel = stft.mel_spectrogram(wav)
- if do_normalization:
- mel = normalize_tacotron_mel(mel)
- return mel
diff --git a/spaces/Marshalls/testmtd/feature_extraction/madmom/processors.py b/spaces/Marshalls/testmtd/feature_extraction/madmom/processors.py
deleted file mode 100644
index 3412698ad5f99b89b07f818c90de866f0fe1e795..0000000000000000000000000000000000000000
--- a/spaces/Marshalls/testmtd/feature_extraction/madmom/processors.py
+++ /dev/null
@@ -1,1023 +0,0 @@
-# encoding: utf-8
-# pylint: disable=no-member
-# pylint: disable=invalid-name
-# pylint: disable=too-many-arguments
-"""
-This module contains all processor related functionality.
-
-Notes
------
-All features should be implemented as classes which inherit from Processor
-(or provide a XYZProcessor(Processor) variant). This way, multiple Processor
-objects can be chained/combined to achieve the wanted functionality.
-
-"""
-
-from __future__ import absolute_import, division, print_function
-
-import argparse
-import itertools as it
-import multiprocessing as mp
-import os
-import sys
-from collections import MutableSequence
-
-import madmom
-import numpy as np
-
-from .utils import integer_types
-
-
-class Processor(object):
- """
- Abstract base class for processing data.
-
- """
-
- @classmethod
- def load(cls, infile):
- """
- Instantiate a new Processor from a file.
-
- This method un-pickles a saved Processor object. Subclasses should
- overwrite this method with a better performing solution if speed is an
- issue.
-
- Parameters
- ----------
- infile : str or file handle
- Pickled processor.
-
- Returns
- -------
- :class:`Processor` instance
- Processor.
-
- """
- import pickle
- from .io import open_file
- # instantiate a new Processor and return it
- with open_file(infile, 'rb') as f:
- # Python 2 and 3 behave differently
- try:
- # Python 3
- obj = pickle.load(f, encoding='latin1')
- except TypeError:
- # Python 2 doesn't have/need the encoding
- obj = pickle.load(f)
- return obj
-
- def dump(self, outfile):
- """
- Save the Processor to a file.
-
- This method pickles a Processor object and saves it. Subclasses should
- overwrite this method with a better performing solution if speed is an
- issue.
-
- Parameters
- ----------
- outfile : str or file handle
- Output file for pickling the processor.
-
- """
- import pickle
- from .io import open_file
- # dump the Processor to the given file
- # Note: for Python 2 / 3 compatibility reason use protocol 2
- with open_file(outfile, 'wb') as f:
- pickle.dump(self, f, protocol=2)
-
- def process(self, data, **kwargs):
- """
- Process the data.
-
- This method must be implemented by the derived class and should
- process the given data and return the processed output.
-
- Parameters
- ----------
- data : depends on the implementation of subclass
- Data to be processed.
- kwargs : dict, optional
- Keyword arguments for processing.
-
- Returns
- -------
- depends on the implementation of subclass
- Processed data.
-
- """
- raise NotImplementedError('Must be implemented by subclass.')
-
- def __call__(self, *args, **kwargs):
- # this magic method makes a Processor callable
- return self.process(*args, **kwargs)
-
-
-class OnlineProcessor(Processor):
- """
- Abstract base class for processing data in online mode.
-
- Derived classes must implement the following methods:
-
- - process_online(): process the data in online mode,
- - process_offline(): process the data in offline mode.
-
- """
-
- def __init__(self, online=False):
- self.online = online
-
- def process(self, data, **kwargs):
- """
- Process the data either in online or offline mode.
-
- Parameters
- ----------
- data : depends on the implementation of subclass
- Data to be processed.
- kwargs : dict, optional
- Keyword arguments for processing.
-
- Returns
- -------
- depends on the implementation of subclass
- Processed data.
-
- Notes
- -----
- This method is used to pass the data to either `process_online` or
- `process_offline`, depending on the `online` setting of the processor.
-
- """
- if self.online:
- return self.process_online(data, **kwargs)
- return self.process_offline(data, **kwargs)
-
- def process_online(self, data, reset=True, **kwargs):
- """
- Process the data in online mode.
-
- This method must be implemented by the derived class and should process
- the given data frame by frame and return the processed output.
-
- Parameters
- ----------
- data : depends on the implementation of subclass
- Data to be processed.
- reset : bool, optional
- Reset the processor to its initial state before processing.
- kwargs : dict, optional
- Keyword arguments for processing.
-
- Returns
- -------
- depends on the implementation of subclass
- Processed data.
-
- """
- raise NotImplementedError('Must be implemented by subclass.')
-
- def process_offline(self, data, **kwargs):
- """
- Process the data in offline mode.
-
- This method must be implemented by the derived class and should process
- the given data and return the processed output.
-
- Parameters
- ----------
- data : depends on the implementation of subclass
- Data to be processed.
- kwargs : dict, optional
- Keyword arguments for processing.
-
- Returns
- -------
- depends on the implementation of subclass
- Processed data.
-
- """
- raise NotImplementedError('Must be implemented by subclass.')
-
- def reset(self):
- """
- Reset the OnlineProcessor.
-
- This method must be implemented by the derived class and should reset
- the processor to its initial state.
-
- """
- raise NotImplementedError('Must be implemented by subclass.')
-
-
-class OutputProcessor(Processor):
- """
- Class for processing data and/or feeding it into some sort of output.
-
- """
-
- def process(self, data, output, **kwargs):
- """
- Processes the data and feed it to the output.
-
- This method must be implemented by the derived class and should
- process the given data and return the processed output.
-
- Parameters
- ----------
- data : depends on the implementation of subclass
- Data to be processed (e.g. written to file).
- output : str or file handle
- Output file name or file handle.
- kwargs : dict, optional
- Keyword arguments for processing.
-
- Returns
- -------
- depends on the implementation of subclass
- Processed data.
-
- """
- # pylint: disable=arguments-differ
- raise NotImplementedError('Must be implemented by subclass.')
-
-
-# functions for processing file(s) with a Processor
-def _process(process_tuple):
- """
- Function to process a Processor with data.
-
- The processed data is returned and if applicable also piped to the given
- output.
-
- Parameters
- ----------
- process_tuple : tuple (Processor/function, data[, output], kwargs)
-
- The tuple must contain a Processor object as the first item and the
- data to be processed as the second tuple item. If a third tuple item
- is given, it is used as an output argument. The last item is passed
- as keyword arguments to the processor's process() method.
- Instead of a Processor also a function accepting a single positional
- argument (data) or two positional arguments (data, output) can be
- given. It must behave exactly as a :class:`Processor`, i.e. return
- the processed data and optionally pipe it to the output. Keyword
- arguments are not passed to the function.
-
- Returns
- -------
- depends on the processor
- Processed data.
-
- Notes
- -----
- This must be a top-level function to be pickle-able.
-
- """
- # do not process the data, if the first item (i.e. Processor) is None
- if process_tuple[0] is None:
- return process_tuple[1]
- # call the Processor with data and kwargs
- elif isinstance(process_tuple[0], Processor):
- return process_tuple[0](*process_tuple[1:-1], **process_tuple[-1])
- # just call whatever we got here (e.g. a function) without kwargs
- return process_tuple[0](*process_tuple[1:-1])
-
-
-class SequentialProcessor(MutableSequence, Processor):
- """
- Processor class for sequential processing of data.
-
- Parameters
- ----------
- processors : list
- Processor instances to be processed sequentially.
-
- Notes
- -----
- If the `processors` list contains lists or tuples, these get wrapped as a
- SequentialProcessor itself.
-
- """
-
- def __init__(self, processors):
- self.processors = []
- # iterate over all given processors and save them
- for processor in processors:
- # wrap lists and tuples as a SequentialProcessor
- if isinstance(processor, (list, tuple)):
- processor = SequentialProcessor(processor)
- # save the processors
- self.processors.append(processor)
-
- def __getitem__(self, index):
- """
- Get the Processor at the given processing chain position.
-
- Parameters
- ----------
- index : int
- Position inside the processing chain.
-
- Returns
- -------
- :class:`Processor`
- Processor at the given position.
-
- """
- return self.processors[index]
-
- def __setitem__(self, index, processor):
- """
- Set the Processor at the given processing chain position.
-
- Parameters
- ----------
- index : int
- Position inside the processing chain.
- processor : :class:`Processor`
- Processor to set.
-
- """
- self.processors[index] = processor
-
- def __delitem__(self, index):
- """
- Delete the Processor at the given processing chain position.
-
- Parameters
- ----------
- index : int
- Position inside the processing chain.
-
- """
- del self.processors[index]
-
- def __len__(self):
- """Length of the processing chain."""
- return len(self.processors)
-
- def insert(self, index, processor):
- """
- Insert a Processor at the given processing chain position.
-
- Parameters
- ----------
- index : int
- Position inside the processing chain.
- processor : :class:`Processor`
- Processor to insert.
-
- """
- self.processors.insert(index, processor)
-
- def append(self, other):
- """
- Append another Processor to the processing chain.
-
- Parameters
- ----------
- other : :class:`Processor`
- Processor to append to the processing chain.
-
- """
- self.processors.append(other)
-
- def extend(self, other):
- """
- Extend the processing chain with a list of Processors.
-
- Parameters
- ----------
- other : list
- Processors to be appended to the processing chain.
-
- """
- self.processors.extend(other)
-
- def process(self, data, **kwargs):
- """
- Process the data sequentially with the defined processing chain.
-
- Parameters
- ----------
- data : depends on the first processor of the processing chain
- Data to be processed.
- kwargs : dict, optional
- Keyword arguments for processing.
-
- Returns
- -------
- depends on the last processor of the processing chain
- Processed data.
-
- """
- # sequentially process the data
- for processor in self.processors:
- data = _process((processor, data, kwargs))
- return data
-
-
-# inherit from SequentialProcessor because of append() and extend()
-class ParallelProcessor(SequentialProcessor):
- """
- Processor class for parallel processing of data.
-
- Parameters
- ----------
- processors : list
- Processor instances to be processed in parallel.
- num_threads : int, optional
- Number of parallel working threads.
-
- Notes
- -----
- If the `processors` list contains lists or tuples, these get wrapped as a
- :class:`SequentialProcessor`.
-
- """
- # pylint: disable=too-many-ancestors
-
- def __init__(self, processors, num_threads=None):
- # set the processing chain
- super(ParallelProcessor, self).__init__(processors)
- # number of threads
- if num_threads is None:
- num_threads = 1
- # Note: we must define the map function here, otherwise it leaks both
- # memory and file descriptors if we init the pool in the process
- # method. This also means that we must use only 1 thread if we
- # want to pickle the Processor, because map is pickle-able,
- # whereas mp.Pool().map is not.
- self.map = map
- if min(len(processors), max(1, num_threads)) > 1:
- self.map = mp.Pool(num_threads).map
-
- def process(self, data, **kwargs):
- """
- Process the data in parallel.
-
- Parameters
- ----------
- data : depends on the processors
- Data to be processed.
- kwargs : dict, optional
- Keyword arguments for processing.
-
- Returns
- -------
- list
- Processed data.
-
- """
- # if only a single processor is given, there's no need to map()
- if len(self.processors) == 1:
- return [_process((self.processors[0], data, kwargs))]
- # process data in parallel and return a list with processed data
- return list(self.map(_process, zip(self.processors, it.repeat(data),
- it.repeat(kwargs))))
-
-
-class IOProcessor(OutputProcessor):
- """
- Input/Output Processor which processes the input data with the input
- processor and pipes everything into the given output processor.
-
- All Processors defined in the input chain are sequentially called with the
- 'data' argument only. The output Processor is the only one ever called with
- two arguments ('data', 'output').
-
- Parameters
- ----------
- in_processor : :class:`Processor`, function, tuple or list
- Input processor. Can be a :class:`Processor` (or subclass thereof
- like :class:`SequentialProcessor` or :class:`ParallelProcessor`), a
- function accepting a single argument ('data'). If a tuple or list
- is given, it is wrapped as a :class:`SequentialProcessor`.
- out_processor : :class:`OutputProcessor`, function, tuple or list
- OutputProcessor or function accepting two arguments ('data', 'output').
- If a tuple or list is given, it is wrapped in an :class:`IOProcessor`
- itself with the last element regarded as the `out_processor` and all
- others as `in_processor`.
-
- """
-
- def __init__(self, in_processor, out_processor=None):
- # TODO: check the input and output processors!?
- # as input a Processor, SequentialProcessor, ParallelProcessor
- # or a function with only one argument should be accepted
- # as output a OutputProcessor, IOProcessor or function with two
- # arguments should be accepted
- # wrap the input processor in a SequentialProcessor if needed
- if isinstance(in_processor, (list, tuple)):
- self.in_processor = SequentialProcessor(in_processor)
- else:
- self.in_processor = in_processor
- # wrap the output processor in an IOProcessor if needed
- if isinstance(out_processor, (list, tuple)):
- if len(out_processor) >= 2:
- # use the last processor as output and all others as input
- self.out_processor = IOProcessor(out_processor[:-1],
- out_processor[-1])
- if len(out_processor) == 1:
- self.out_processor = out_processor[0]
- else:
- self.out_processor = out_processor
-
- def __getitem__(self, index):
- """
- Get the Processor at the given position.
-
- Parameters
- ----------
- index : int
- Processor position. Index '0' refers to the `in_processor`,
- index '1' to the `out_processor`.
-
- Returns
- -------
- :class:`Processor`
- Processor at the given position.
-
- """
- if index == 0:
- return self.in_processor
- elif index == 1:
- return self.out_processor
- else:
- raise IndexError('Only `in_processor` at index 0 and '
- '`out_processor` at index 1 are defined.')
-
- def process(self, data, output=None, **kwargs):
- """
- Processes the data with the input processor and pipe everything into
- the output processor, which also pipes it to `output`.
-
- Parameters
- ----------
- data : depends on the input processors
- Data to be processed.
- output: str or file handle
- Output file (handle).
- kwargs : dict, optional
- Keyword arguments for processing.
-
- Returns
- -------
- depends on the output processors
- Processed data.
-
- """
- # process the data by the input processor
- data = _process((self.in_processor, data, kwargs))
- # process the data by the output processor and return it
- return _process((self.out_processor, data, output, kwargs))
-
-
-# functions and classes to process files with a Processor
-def process_single(processor, infile, outfile, **kwargs):
- """
- Process a single file with the given Processor.
-
- Parameters
- ----------
- processor : :class:`Processor` instance
- Processor to be processed.
- infile : str or file handle
- Input file (handle).
- outfile : str or file handle
- Output file (handle).
-
- """
- # pylint: disable=unused-argument
- # adjust origin in online mode
- if kwargs.get('online'):
- kwargs['origin'] = 'online'
- kwargs['reset'] = False
- # process the input file
- _process((processor, infile, outfile, kwargs))
-
-
-class _ParallelProcess(mp.Process):
- """
- Class for processing tasks in a queue.
-
- Parameters
- ----------
- task_queue :
- Queue with tasks, i.e. tuples ('processor', 'infile', 'outfile')
-
- Notes
- -----
- Usually, multiple instances are created via :func:`process_batch`.
-
- """
- def __init__(self, task_queue):
- super(_ParallelProcess, self).__init__()
- self.task_queue = task_queue
-
- def run(self):
- """Process all tasks from the task queue."""
- from .io.audio import LoadAudioFileError
- while True:
- # get the task tuple
- processor, infile, outfile, kwargs = self.task_queue.get()
- try:
- # process the Processor with the data
- _process((processor, infile, outfile, kwargs))
- except LoadAudioFileError as e:
- print(e)
- finally:
- # signal that it is done
- self.task_queue.task_done()
-
-
-# function to batch process multiple files with a processor
-def process_batch(processor, files, output_dir=None, output_suffix=None,
- strip_ext=True, num_workers=mp.cpu_count(), shuffle=False,
- **kwargs):
- """
- Process a list of files with the given Processor in batch mode.
-
- Parameters
- ----------
- processor : :class:`Processor` instance
- Processor to be processed.
- files : list
- Input file(s) (handles).
- output_dir : str, optional
- Output directory.
- output_suffix : str, optional
- Output suffix (e.g. '.txt' including the dot).
- strip_ext : bool, optional
- Strip off the extension from the input files.
- num_workers : int, optional
- Number of parallel working threads.
- shuffle : bool, optional
- Shuffle the `files` before distributing them to the working threads
-
- Notes
- -----
- Either `output_dir` and/or `output_suffix` must be set. If `strip_ext` is
- True, the extension of the input file names is stripped off before the
- `output_suffix` is appended to the input file names.
-
- Use `shuffle` if you experience out of memory errors (can occur for certain
- methods with high memory consumptions if consecutive files are rather
- long).
-
- """
- # pylint: disable=unused-argument
- # either output_dir or output_suffix must be given
- if output_dir is None and output_suffix is None:
- raise ValueError('either output directory or suffix must be given')
- # make sure the directory exists
- if output_dir is not None:
- try:
- # create output directory
- os.mkdir(output_dir)
- except OSError:
- # directory exists already
- pass
-
- # create task queue
- tasks = mp.JoinableQueue()
- # create working threads
- processes = [_ParallelProcess(tasks) for _ in range(num_workers)]
- for p in processes:
- p.daemon = True
- p.start()
-
- # shuffle files?
- if shuffle:
- from random import shuffle
- shuffle(files)
-
- # process all the files
- for input_file in files:
- # set the output file name
- if output_dir is not None:
- output_file = "%s/%s" % (output_dir, os.path.basename(input_file))
- else:
- output_file = input_file
- # strip off the extension
- if strip_ext:
- output_file = os.path.splitext(output_file)[0]
- # append the suffix if needed
- if output_suffix is not None:
- output_file += output_suffix
- # put processing tasks in the queue
- tasks.put((processor, input_file, output_file, kwargs))
- # wait for all processing tasks to finish
- tasks.join()
-
-
-# processor for buffering data
-class BufferProcessor(Processor):
- """
- Buffer for processors which need context to do their processing.
-
- Parameters
- ----------
- buffer_size : int or tuple
- Size of the buffer (time steps, [additional dimensions]).
- init : numpy array, optional
- Init the buffer with this array.
- init_value : float, optional
- If only `buffer_size` is given but no `init`, use this value to
- initialise the buffer.
-
- Notes
- -----
- If `buffer_size` (or the first item thereof in case of tuple) is 1,
- only the un-buffered current value is returned.
-
- If context is needed, `buffer_size` must be set to >1.
- E.g. SpectrogramDifference needs a context of two frames to be able to
- compute the difference between two consecutive frames.
-
- """
-
- def __init__(self, buffer_size=None, init=None, init_value=0):
- # if init is given, infer buffer_size from it
- if buffer_size is None and init is not None:
- buffer_size = init.shape
- # if buffer_size is int, make a tuple
- elif isinstance(buffer_size, integer_types):
- buffer_size = (buffer_size, )
- # TODO: use np.pad for fancy initialisation (can be done in process())
- # init buffer if needed
- if buffer_size is not None and init is None:
- init = np.ones(buffer_size) * init_value
- # save variables
- self.buffer_size = buffer_size
- self.init = init
- self.data = init
-
- @property
- def buffer_length(self):
- """Length of the buffer (time steps)."""
- return self.buffer_size[0]
-
- def reset(self, init=None):
- """
- Reset BufferProcessor to its initial state.
-
- Parameters
- ----------
- init : numpy array, shape (num_hiddens,), optional
- Reset BufferProcessor to this initial state.
-
- """
- self.data = init if init is not None else self.init
-
- def process(self, data, **kwargs):
- """
- Buffer the data.
-
- Parameters
- ----------
- data : numpy array or subclass thereof
- Data to be buffered.
-
- Returns
- -------
- numpy array or subclass thereof
- Data with buffered context.
-
- Notes
- -----
- If the length of data is the same as the buffer's length, the data of
- the buffer is completely overwritten by new data. If it exceeds the
- length, only the latest 'buffer_length' items of data are used.
-
- """
- # expected minimum number of dimensions
- ndmin = len(self.buffer_size)
- # cast the data to have that many dimensions
- if data.ndim < ndmin:
- data = np.array(data, copy=False, subok=True, ndmin=ndmin)
- # length of the data
- data_length = len(data)
- # if length of data exceeds buffer length simply replace buffer data
- if data_length >= self.buffer_length:
- self.data = data[-self.buffer_length:]
- else:
- # roll buffer by `data_length`, i.e. move data to the 'left'
- self.data = np.roll(self.data, -data_length, axis=0)
- # overwrite 'right' part with new data
- self.data[-data_length:] = data
- # return the complete buffer
- return self.data
-
- # alias for easier / more intuitive calling
- buffer = process
-
- def __getitem__(self, index):
- """
- Direct access to the buffer data.
-
- Parameters
- ----------
- index : int, slice, ndarray,
- Any NumPy indexing method to access the buffer data directly.
-
- Returns
- -------
- numpy array or subclass thereof
- Requested view of the buffered data.
-
- """
- return self.data[index]
-
-
-# function to process live input
-def process_online(processor, infile, outfile, **kwargs):
- """
- Process a file or audio stream with the given Processor.
-
- Parameters
- ----------
- processor : :class:`Processor` instance
- Processor to be processed.
- infile : str or file handle, optional
- Input file (handle). If none is given, the stream present at the
- system's audio input is used. Additional keyword arguments can be used
- to influence the frame size and hop size.
- outfile : str or file handle
- Output file (handle).
- kwargs : dict, optional
- Keyword arguments passed to :class:`.audio.signal.Stream` if
- `in_stream` is 'None'.
-
- Notes
- -----
- Right now there is no way to determine if a processor is online-capable or
- not. Thus, calling any processor with this function may not produce the
- results expected.
-
- """
- from madmom.audio.signal import Stream, FramedSignal
- # set default values
- kwargs['sample_rate'] = kwargs.get('sample_rate', 44100)
- kwargs['num_channels'] = kwargs.get('num_channels', 1)
- # list all available PyAudio devices and exit afterwards
- if kwargs['list_stream_input_device']:
- import pyaudio
- pa = pyaudio.PyAudio()
- for i in range(pa.get_device_count()):
- info = pa.get_device_info_by_index(i)
- print('%d: %s' % (info['index'], info['name']))
- exit(0)
-
- # if no input file is given, create a Stream with the given arguments
- if infile is None:
- # open a stream and start if not running already
- stream = Stream(**kwargs)
- if not stream.is_running():
- stream.start()
- # use the input file
- else:
- # set parameters for opening the file
- from .audio.signal import FRAME_SIZE, HOP_SIZE, FPS, NUM_CHANNELS
- frame_size = kwargs.get('frame_size', FRAME_SIZE)
- hop_size = kwargs.get('hop_size', HOP_SIZE)
- fps = kwargs.get('fps', FPS)
- num_channels = kwargs.get('num_channels', NUM_CHANNELS)
- # FIXME: overwrite the frame size with the maximum value of all used
- # processors. This is needed if multiple frame sizes are used
- import warnings
- warnings.warn('make sure that the `frame_size` (%d) is equal to the '
- 'maximum value used by any `FramedSignalProcessor`.' %
- frame_size)
- # Note: origin must be 'online' and num_frames 'None' to behave exactly
- # the same as with live input
- stream = FramedSignal(infile, frame_size=frame_size, hop_size=hop_size,
- fps=fps, origin='online', num_frames=None,
- num_channels=num_channels)
- # set arguments for online processing
- # Note: pass only certain arguments, because these will be passed to the
- # processors at every time step (kwargs contains file handles etc.)
- process_args = {'reset': False} # do not reset stateful processors
- # process everything frame-by-frame
- for frame in stream:
- _process((processor, frame, outfile, process_args))
-
-
-# function for pickling a processor
-def pickle_processor(processor, outfile, **kwargs):
- """
- Pickle the Processor to a file.
-
- Parameters
- ----------
- processor : :class:`Processor` instance
- Processor to be pickled.
- outfile : str or file handle
- Output file (handle) where to pickle it.
-
- """
- # pylint: disable=unused-argument
- processor.dump(outfile)
-
-
-# generic input/output arguments for scripts
-def io_arguments(parser, output_suffix='.txt', pickle=True, online=False):
- """
- Add input / output related arguments to an existing parser.
-
- Parameters
- ----------
- parser : argparse parser instance
- Existing argparse parser object.
- output_suffix : str, optional
- Suffix appended to the output files.
- pickle : bool, optional
- Add a 'pickle' sub-parser to the parser.
- online : bool, optional
- Add a 'online' sub-parser to the parser.
-
- """
- # default output
- try:
- output = sys.stdout.buffer
- except AttributeError:
- output = sys.stdout
- # add general options
- parser.add_argument('-v', dest='verbose', action='count',
- help='increase verbosity level')
- # add subparsers
- sub_parsers = parser.add_subparsers(title='processing options')
-
- # pickle processor options
- if pickle:
- sp = sub_parsers.add_parser('pickle', help='pickle processor')
- sp.set_defaults(func=pickle_processor)
- # Note: requiring '-o' is a simple safety measure to not overwrite
- # existing audio files after using the processor in 'batch' mode
- sp.add_argument('-o', dest='outfile', type=argparse.FileType('wb'),
- default=output, help='output file [default: STDOUT]')
-
- # single file processing options
- sp = sub_parsers.add_parser('single', help='single file processing')
- sp.set_defaults(func=process_single)
- sp.add_argument('infile', type=argparse.FileType('rb'),
- help='input audio file')
- # Note: requiring '-o' is a simple safety measure to not overwrite existing
- # audio files after using the processor in 'batch' mode
- sp.add_argument('-o', dest='outfile', type=argparse.FileType('wb'),
- default=output, help='output file [default: STDOUT]')
- sp.add_argument('-j', dest='num_threads', type=int, default=mp.cpu_count(),
- help='number of threads [default=%(default)s]')
- # add arguments needed for loading processors
- if online:
- sp.add_argument('--online', action='store_true', default=None,
- help='use online settings [default: offline]')
-
- # batch file processing options
- sp = sub_parsers.add_parser('batch', help='batch file processing')
- sp.set_defaults(func=process_batch)
- sp.add_argument('files', nargs='+', help='files to be processed')
- sp.add_argument('-o', dest='output_dir', default=None,
- help='output directory [default=%(default)s]')
- sp.add_argument('-s', dest='output_suffix', default=output_suffix,
- help='suffix appended to the files (dot must be included '
- 'if wanted) [default=%(default)s]')
- sp.add_argument('--ext', dest='strip_ext', action='store_false',
- help='keep the extension of the input file [default='
- 'strip it off before appending the output suffix]')
- sp.add_argument('-j', dest='num_workers', type=int, default=mp.cpu_count(),
- help='number of workers [default=%(default)s]')
- sp.add_argument('--shuffle', action='store_true',
- help='shuffle files before distributing them to the '
- 'working threads [default=process them in sorted '
- 'order]')
- sp.set_defaults(num_threads=1)
-
- # online processing options
- if online:
- sp = sub_parsers.add_parser('online', help='online processing')
- sp.set_defaults(func=process_online)
- sp.add_argument('infile', nargs='?', type=argparse.FileType('rb'),
- default=None, help='input audio file (if no file is '
- 'given, a stream operating on the '
- 'system audio input is used)')
- sp.add_argument('-o', dest='outfile', type=argparse.FileType('wb'),
- default=output, help='output file [default: STDOUT]')
- sp.add_argument('-j', dest='num_threads', type=int, default=1,
- help='number of threads [default=%(default)s]')
- sp.add_argument('--device', dest='stream_input_device', type=int,
- default=None, help='PyAudio device index of the '
- 'desired input device '
- '[default=%(default)s]')
- sp.add_argument('--list', dest='list_stream_input_device',
- action='store_true', default=False,
- help='show a list of available PyAudio devices; index '
- 'can be used as STREAM_INPUT_DEVICE for the '
- '--device argument')
- # set arguments for loading processors
- sp.set_defaults(online=True) # use online settings/parameters
- sp.set_defaults(num_frames=1) # process everything frame-by-frame
- sp.set_defaults(origin='stream') # set origin to get whole frame
diff --git a/spaces/Martlgap/LiveFaceID/app.py b/spaces/Martlgap/LiveFaceID/app.py
deleted file mode 100644
index 074c5a86408f4ca53ee7adc4117d9be27f2dc3d6..0000000000000000000000000000000000000000
--- a/spaces/Martlgap/LiveFaceID/app.py
+++ /dev/null
@@ -1,420 +0,0 @@
-import streamlit as st
-import time
-from typing import List
-from streamlit_webrtc import webrtc_streamer, WebRtcMode
-import av
-import numpy as np
-import onnxruntime as rt
-import threading
-import mediapipe as mp
-import os
-from twilio.rest import Client
-import cv2
-from skimage.transform import SimilarityTransform
-from types import SimpleNamespace
-from sklearn.metrics.pairwise import cosine_distances
-
-
-class Detection(SimpleNamespace):
- bbox: List[List[float]] = None
- landmarks: List[List[float]] = None
-
-
-class Identity(SimpleNamespace):
- detection: Detection = Detection()
- name: str = None
- embedding: np.ndarray = None
- face: np.ndarray = None
-
-
-class Match(SimpleNamespace):
- subject_id: Identity = Identity()
- gallery_id: Identity = Identity()
- distance: float = None
- name: str = None
-
-
-class Grabber(object):
- def __init__(self, video_receiver) -> None:
- self.currentFrame = None
- self.capture = video_receiver
- self.thread = threading.Thread(target=self.update_frame)
- self.thread.daemon = True
-
- def update_frame(self) -> None:
- while True:
- self.currentFrame = self.capture.get_frame()
-
- def get_frame(self) -> av.VideoFrame:
- return self.currentFrame
-
-
-# Similarity threshold for face matching
-SIMILARITY_THRESHOLD = 1.2
-
-# Get twilio ice server configuration using twilio credentials from environment variables (set in streamlit secrets)
-# Ref: https://www.twilio.com/docs/stun-turn/api
-client = Client(os.environ["TWILIO_ACCOUNT_SID"], os.environ["TWILIO_AUTH_TOKEN"])
-token = client.tokens.create()
-ICE_SERVERS = token.ice_servers
-
-# Set page layout for streamlit to wide
-st.set_page_config(layout="wide", page_title="Live Face Recognition", page_icon=":sunglasses:")
-
-# Streamlit app
-st.title("Live Webcam Face Recognition")
-
-st.markdown("**Live Stream**")
-ctx_container = st.container()
-stream_container = st.empty()
-
-st.markdown("**Matches**")
-matches_container = st.info("No matches found yet ...")
-
-st.markdown("**Info**")
-info_container = st.empty()
-
-
-# Init face detector and face recognizer
-face_recognizer = rt.InferenceSession("model.fixed.onnx", providers=rt.get_available_providers())
-face_detector = mp.solutions.face_mesh.FaceMesh(
- refine_landmarks=True,
- min_detection_confidence=0.5,
- min_tracking_confidence=0.5,
- max_num_faces=5,
-)
-
-
-def detect_faces(frame: np.ndarray) -> List[Detection]:
- # Process the frame with the face detector
- result = face_detector.process(frame)
-
- # Initialize an empty list to store the detected faces
- detections = []
-
- # Check if any faces were detected
- if result.multi_face_landmarks:
- # Iterate over each detected face
- for count, detection in enumerate(result.multi_face_landmarks):
- # Select 5 Landmarks
- five_landmarks = np.asarray(detection.landmark)[[470, 475, 1, 57, 287]]
-
- # Extract the x and y coordinates of the landmarks of interest
- landmarks = [[landmark.x * frame.shape[1], landmark.y * frame.shape[0]] for landmark in five_landmarks]
-
- # Extract the x and y coordinates of all landmarks
- all_x_coords = [landmark.x * frame.shape[1] for landmark in detection.landmark]
- all_y_coords = [landmark.y * frame.shape[0] for landmark in detection.landmark]
-
- # Compute the bounding box of the face
- x_min, x_max = int(min(all_x_coords)), int(max(all_x_coords))
- y_min, y_max = int(min(all_y_coords)), int(max(all_y_coords))
- bbox = [[x_min, y_min], [x_max, y_max]]
-
- # Create a Detection object for the face
- detection = Detection(
- idx=count,
- bbox=bbox,
- landmarks=landmarks,
- confidence=None,
- )
-
- # Add the detection to the list
- detections.append(detection)
-
- # Return the list of detections
- return detections
-
-
-def recognize_faces(frame: np.ndarray, detections: List[Detection]) -> List[Identity]:
- if not detections:
- return []
-
- identities = []
- for detection in detections:
- # ALIGNMENT -----------------------------------------------------------
- # Target landmark coordinates (as used in training)
- landmarks_target = np.array(
- [
- [38.2946, 51.6963],
- [73.5318, 51.5014],
- [56.0252, 71.7366],
- [41.5493, 92.3655],
- [70.7299, 92.2041],
- ],
- dtype=np.float32,
- )
- tform = SimilarityTransform()
- tform.estimate(detection.landmarks, landmarks_target)
- tmatrix = tform.params[0:2, :]
- face_aligned = cv2.warpAffine(frame, tmatrix, (112, 112), borderValue=0.0)
- # ---------------------------------------------------------------------
-
- # INFERENCE -----------------------------------------------------------
- # Inference face embeddings with onnxruntime
- input_image = (np.asarray([face_aligned]).astype(np.float32) / 255.0).clip(0.0, 1.0)
- embedding = face_recognizer.run(None, {"input_image": input_image})[0][0]
- # ---------------------------------------------------------------------
-
- # Create Identity object
- identities.append(Identity(detection=detection, embedding=embedding, face=face_aligned))
-
- return identities
-
-
-def match_faces(subjects: List[Identity], gallery: List[Identity]) -> List[Match]:
- if len(gallery) == 0 or len(subjects) == 0:
- return []
-
- # Get Embeddings
- embs_gal = np.asarray([identity.embedding for identity in gallery])
- embs_det = np.asarray([identity.embedding for identity in subjects])
-
- # Calculate Cosine Distances
- cos_distances = cosine_distances(embs_det, embs_gal)
-
- # Find Matches
- matches = []
- for ident_idx, identity in enumerate(subjects):
- dists_to_identity = cos_distances[ident_idx]
- idx_min = np.argmin(dists_to_identity)
- if dists_to_identity[idx_min] < SIMILARITY_THRESHOLD:
- matches.append(
- Match(
- subject_id=identity,
- gallery_id=gallery[idx_min],
- distance=dists_to_identity[idx_min],
- )
- )
-
- # Sort Matches by identity_idx
- matches = sorted(matches, key=lambda match: match.gallery_id.name)
-
- return matches
-
-
-def draw_annotations(frame: np.ndarray, detections: List[Detection], matches: List[Match]) -> np.ndarray:
- global timestamp
- shape = np.asarray(frame.shape[:2][::-1])
-
- # Upscale frame to 1080p for better visualization of drawn annotations
- frame = cv2.resize(frame, (1920, 1080))
- upscale_factor = np.asarray([1920 / shape[0], 1080 / shape[1]])
- shape = np.asarray(frame.shape[:2][::-1])
-
- # Make frame writeable (for better performance)
- frame.flags.writeable = True
-
- fps = 1 / (time.time() - timestamp)
- timestamp = time.time()
-
- # Draw FPS
- cv2.putText(
- frame,
- f"FPS: {fps:.1f}",
- (20, 40),
- cv2.FONT_HERSHEY_SIMPLEX,
- 1,
- (0, 255, 0),
- 2,
- )
-
- # Draw Detections
- for detection in detections:
- # Draw Landmarks
- for landmark in detection.landmarks:
- cv2.circle(
- frame,
- (landmark * upscale_factor).astype(int),
- 2,
- (255, 255, 255),
- -1,
- )
-
- # Draw Bounding Box
- cv2.rectangle(
- frame,
- (detection.bbox[0] * upscale_factor).astype(int),
- (detection.bbox[1] * upscale_factor).astype(int),
- (255, 0, 0),
- 2,
- )
-
- # Draw Index
- cv2.putText(
- frame,
- str(detection.idx),
- (
- ((detection.bbox[1][0] + 2) * upscale_factor[0]).astype(int),
- ((detection.bbox[1][1] + 2) * upscale_factor[1]).astype(int),
- ),
- cv2.LINE_AA,
- 0.5,
- (0, 0, 0),
- 2,
- )
-
- # Draw Matches
- for match in matches:
- detection = match.subject_id.detection
- name = match.gallery_id.name
-
- # Draw Bounding Box in green
- cv2.rectangle(
- frame,
- (detection.bbox[0] * upscale_factor).astype(int),
- (detection.bbox[1] * upscale_factor).astype(int),
- (0, 255, 0),
- 2,
- )
-
- # Draw Banner
- cv2.rectangle(
- frame,
- (
- (detection.bbox[0][0] * upscale_factor[0]).astype(int),
- (detection.bbox[0][1] * upscale_factor[1] - (shape[1] // 25)).astype(int),
- ),
- (
- (detection.bbox[1][0] * upscale_factor[0]).astype(int),
- (detection.bbox[0][1] * upscale_factor[1]).astype(int),
- ),
- (255, 255, 255),
- -1,
- )
-
- # Draw Name
- cv2.putText(
- frame,
- name,
- (
- ((detection.bbox[0][0] + shape[0] // 400) * upscale_factor[0]).astype(int),
- ((detection.bbox[0][1] - shape[1] // 50) * upscale_factor[1]).astype(int),
- ),
- cv2.LINE_AA,
- 0.7,
- (0, 0, 0),
- 2,
- )
-
- # Draw Distance
- cv2.putText(
- frame,
- f" Distance: {match.distance:.2f}",
- (
- ((detection.bbox[0][0] + shape[0] // 400) * upscale_factor[0]).astype(int),
- ((detection.bbox[0][1] - shape[1] // 350) * upscale_factor[1]).astype(int),
- ),
- cv2.LINE_AA,
- 0.5,
- (0, 0, 0),
- 2,
- )
-
- return frame
-
-
-def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
- # Convert frame to numpy array
- frame = frame.to_ndarray(format="rgb24")
-
- # Run face detection
- detections = detect_faces(frame)
-
- # Run face recognition
- subjects = recognize_faces(frame, detections)
-
- # Run face matching
- matches = match_faces(subjects, gallery)
-
- # Draw annotations
- frame = draw_annotations(frame, detections, matches)
-
- # Convert frame back to av.VideoFrame
- frame = av.VideoFrame.from_ndarray(frame, format="rgb24")
-
- return frame, matches
-
-
-# Sidebar for face gallery
-with st.sidebar:
- st.markdown("# Face Gallery")
- files = st.sidebar.file_uploader(
- "Upload images to gallery",
- type=["png", "jpg", "jpeg"],
- accept_multiple_files=True,
- label_visibility="collapsed",
- )
-
- # Init gallery
- gallery = []
- for file in files:
- # Read file bytes
- file_bytes = np.asarray(bytearray(file.read()), dtype=np.uint8)
-
- # Decode image and convert from BGR to RGB
- img = cv2.cvtColor(cv2.imdecode(file_bytes, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
-
- # Detect faces
- detections = detect_faces(img)
-
- if detections:
- # recognize faces
- subjects = recognize_faces(img, detections[:1])
-
- # Add subjects to gallery
- gallery.append(
- Identity(
- name=os.path.splitext(file.name)[0],
- embedding=subjects[0].embedding,
- face=subjects[0].face,
- )
- )
-
- # Show gallery images
- st.image(
- image=[identity.face for identity in gallery],
- caption=[identity.name for identity in gallery],
- )
-
-# Start streaming component
-with ctx_container:
- ctx = webrtc_streamer(
- key="LiveFaceRecognition",
- mode=WebRtcMode.SENDONLY,
- rtc_configuration={"iceServers": ICE_SERVERS},
- media_stream_constraints={"video": {"width": 1920}, "audio": False},
- )
-
-# Initialize frame grabber
-grabber = Grabber(ctx.video_receiver)
-
-if ctx.state.playing:
- # Start frame grabber in background thread
- grabber.thread.start()
- timestamp = time.time()
-
- # Start main loop
- while True:
- frame = grabber.get_frame()
- if frame is not None:
- # Print frame timestamp to streamlit
- info_container.write(f"Frame timestamp: {frame.time}")
-
- # Run face detection and recognition
- frame, matches = video_frame_callback(frame)
-
- # Convert frame to numpy array
- frame = frame.to_ndarray(format="rgb24")
-
- # Show Stream
- stream_container.image(frame, channels="RGB")
-
- # Show Matches
- if matches:
- matches_container.image(
- image=[match.subject_id.face for match in matches],
- caption=[match.gallery_id.name for match in matches],
- )
- else:
- matches_container.info("No matches found yet ...")
diff --git a/spaces/Masa-digital-art/planning-proposal-gpt-4/app.py b/spaces/Masa-digital-art/planning-proposal-gpt-4/app.py
deleted file mode 100644
index e1763e3b59980d4a5cb01b8ab414a52ae89e92e0..0000000000000000000000000000000000000000
--- a/spaces/Masa-digital-art/planning-proposal-gpt-4/app.py
+++ /dev/null
@@ -1,131 +0,0 @@
-import gradio as gr
-import openai
-import requests
-import os
-import fileinput
-from dotenv import load_dotenv
-
-title="企画のアイデア提案AI-gpt-4-β"
-inputs_label="""
-- 物語の企画の元になるアイデアやキーワード(登場人物とその設定、物語の舞台、出来事など)を入力してください。
-"""
-outputs_label="あらすじの提案"
-description="""
-- あなたが入力したアイデアやキーワードに応じて、AIが物語の企画を提案します。
-"""
-
-article = """
-
-"""
-
-load_dotenv()
-openai.api_key = os.getenv('OPENAI_API_KEY')
-MODEL = "gpt-4"
-
-def get_filetext(filename, cache={}):
- if filename in cache:
- # キャッシュに保存されている場合は、キャッシュからファイル内容を取得する
- return cache[filename]
- else:
- if not os.path.exists(filename):
- raise ValueError(f"ファイル '{filename}' が見つかりませんでした")
- with open(filename, "r") as f:
- text = f.read()
- # ファイル内容をキャッシュする
- cache[filename] = text
- return text
-
-class OpenAI:
-
- @classmethod
- def chat_completion(cls, prompt, start_with=""):
- constraints = get_filetext(filename = "constraints.md")
- template = get_filetext(filename = "template.md")
-
- # ChatCompletion APIに渡すデータを定義する
- data = {
- "model": "gpt-4",
- "messages": [
- {"role": "system", "content": constraints}
- ,{"role": "system", "content": template}
- ,{"role": "assistant", "content": "Sure!"}
- ,{"role": "user", "content": prompt}
- ,{"role": "assistant", "content": start_with}
- ],
- }
-
- # ChatCompletion APIを呼び出す
- response = requests.post(
- "https://api.openai.com/v1/chat/completions",
- headers={
- "Content-Type": "application/json",
- "Authorization": f"Bearer {openai.api_key}"
- },
- json=data
- )
-
- # ChatCompletion APIから返された結果を取得する
- result = response.json()
- print(result)
- content = result["choices"][0]["message"]["content"].strip()
- return content
-
-class NajiminoAI:
-
- @classmethod
- def generate_emo_prompt(cls, user_message):
- template = get_filetext(filename="template.md")
- prompt = f"""
- {user_message}
- ---
- 上記を元に、下記テンプレートを日本語で埋めてください。
- ---
- {template}
- """
- return prompt
-
- @classmethod
- def generate_emo(cls, user_message):
- prompt = NajiminoAI.generate_emo_prompt(user_message);
- start_with = ""
- result = OpenAI.chat_completion(prompt=prompt, start_with=start_with)
- return result
-
-def main():
- iface = gr.Interface(fn=NajiminoAI.generate_emo,
- inputs=gr.Textbox(label=inputs_label),
- outputs=gr.Textbox(label=outputs_label),
- title=title,
- description=description,
- article=article,
- allow_flagging='never'
- )
-
- iface.launch()
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/spaces/MirageML/sjc/sd1/ldm/modules/losses/__init__.py b/spaces/MirageML/sjc/sd1/ldm/modules/losses/__init__.py
deleted file mode 100644
index 876d7c5bd6e3245ee77feb4c482b7a8143604ad5..0000000000000000000000000000000000000000
--- a/spaces/MirageML/sjc/sd1/ldm/modules/losses/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from ldm.modules.losses.contperceptual import LPIPSWithDiscriminator
\ No newline at end of file
diff --git a/spaces/MisterZee/PIFu-Clothed-Human-Digitization/PIFu/lib/renderer/gl/__init__.py b/spaces/MisterZee/PIFu-Clothed-Human-Digitization/PIFu/lib/renderer/gl/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/MrBodean/VoiceClone/README.md b/spaces/MrBodean/VoiceClone/README.md
deleted file mode 100644
index 54a1ff6c185f4a025bb31ff1ab4bc79eac1a1937..0000000000000000000000000000000000000000
--- a/spaces/MrBodean/VoiceClone/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
----
-title: Real Time Voice Cloning
-emoji: 📈
-colorFrom: blue
-colorTo: red
-sdk: gradio
-app_file: app.py
-pinned: false
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio` or `streamlit`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/NCTCMumbai/NCTC/models/official/vision/image_classification/preprocessing.py b/spaces/NCTCMumbai/NCTC/models/official/vision/image_classification/preprocessing.py
deleted file mode 100644
index 3f2019189d4e5f9c269a67276531b4344ede7e32..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/official/vision/image_classification/preprocessing.py
+++ /dev/null
@@ -1,391 +0,0 @@
-# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Preprocessing functions for images."""
-
-from __future__ import absolute_import
-from __future__ import division
-# from __future__ import google_type_annotations
-from __future__ import print_function
-
-import tensorflow as tf
-from typing import List, Optional, Text, Tuple
-
-from official.vision.image_classification import augment
-
-
-# Calculated from the ImageNet training set
-MEAN_RGB = (0.485 * 255, 0.456 * 255, 0.406 * 255)
-STDDEV_RGB = (0.229 * 255, 0.224 * 255, 0.225 * 255)
-
-IMAGE_SIZE = 224
-CROP_PADDING = 32
-
-
-def mean_image_subtraction(
- image_bytes: tf.Tensor,
- means: Tuple[float, ...],
- num_channels: int = 3,
- dtype: tf.dtypes.DType = tf.float32,
-) -> tf.Tensor:
- """Subtracts the given means from each image channel.
-
- For example:
- means = [123.68, 116.779, 103.939]
- image_bytes = mean_image_subtraction(image_bytes, means)
-
- Note that the rank of `image` must be known.
-
- Args:
- image_bytes: a tensor of size [height, width, C].
- means: a C-vector of values to subtract from each channel.
- num_channels: number of color channels in the image that will be distorted.
- dtype: the dtype to convert the images to. Set to `None` to skip conversion.
-
- Returns:
- the centered image.
-
- Raises:
- ValueError: If the rank of `image` is unknown, if `image` has a rank other
- than three or if the number of channels in `image` doesn't match the
- number of values in `means`.
- """
- if image_bytes.get_shape().ndims != 3:
- raise ValueError('Input must be of size [height, width, C>0]')
-
- if len(means) != num_channels:
- raise ValueError('len(means) must match the number of channels')
-
- # We have a 1-D tensor of means; convert to 3-D.
- # Note(b/130245863): we explicitly call `broadcast` instead of simply
- # expanding dimensions for better performance.
- means = tf.broadcast_to(means, tf.shape(image_bytes))
- if dtype is not None:
- means = tf.cast(means, dtype=dtype)
-
- return image_bytes - means
-
-
-def standardize_image(
- image_bytes: tf.Tensor,
- stddev: Tuple[float, ...],
- num_channels: int = 3,
- dtype: tf.dtypes.DType = tf.float32,
-) -> tf.Tensor:
- """Divides the given stddev from each image channel.
-
- For example:
- stddev = [123.68, 116.779, 103.939]
- image_bytes = standardize_image(image_bytes, stddev)
-
- Note that the rank of `image` must be known.
-
- Args:
- image_bytes: a tensor of size [height, width, C].
- stddev: a C-vector of values to divide from each channel.
- num_channels: number of color channels in the image that will be distorted.
- dtype: the dtype to convert the images to. Set to `None` to skip conversion.
-
- Returns:
- the centered image.
-
- Raises:
- ValueError: If the rank of `image` is unknown, if `image` has a rank other
- than three or if the number of channels in `image` doesn't match the
- number of values in `stddev`.
- """
- if image_bytes.get_shape().ndims != 3:
- raise ValueError('Input must be of size [height, width, C>0]')
-
- if len(stddev) != num_channels:
- raise ValueError('len(stddev) must match the number of channels')
-
- # We have a 1-D tensor of stddev; convert to 3-D.
- # Note(b/130245863): we explicitly call `broadcast` instead of simply
- # expanding dimensions for better performance.
- stddev = tf.broadcast_to(stddev, tf.shape(image_bytes))
- if dtype is not None:
- stddev = tf.cast(stddev, dtype=dtype)
-
- return image_bytes / stddev
-
-
-def normalize_images(features: tf.Tensor,
- mean_rgb: Tuple[float, ...] = MEAN_RGB,
- stddev_rgb: Tuple[float, ...] = STDDEV_RGB,
- num_channels: int = 3,
- dtype: tf.dtypes.DType = tf.float32,
- data_format: Text = 'channels_last') -> tf.Tensor:
- """Normalizes the input image channels with the given mean and stddev.
-
- Args:
- features: `Tensor` representing decoded images in float format.
- mean_rgb: the mean of the channels to subtract.
- stddev_rgb: the stddev of the channels to divide.
- num_channels: the number of channels in the input image tensor.
- dtype: the dtype to convert the images to. Set to `None` to skip conversion.
- data_format: the format of the input image tensor
- ['channels_first', 'channels_last'].
-
- Returns:
- A normalized image `Tensor`.
- """
- # TODO(allencwang) - figure out how to use mean_image_subtraction and
- # standardize_image on batches of images and replace the following.
- if data_format == 'channels_first':
- stats_shape = [num_channels, 1, 1]
- else:
- stats_shape = [1, 1, num_channels]
-
- if dtype is not None:
- features = tf.image.convert_image_dtype(features, dtype=dtype)
-
- if mean_rgb is not None:
- mean_rgb = tf.constant(mean_rgb,
- shape=stats_shape,
- dtype=features.dtype)
- mean_rgb = tf.broadcast_to(mean_rgb, tf.shape(features))
- features = features - mean_rgb
-
- if stddev_rgb is not None:
- stddev_rgb = tf.constant(stddev_rgb,
- shape=stats_shape,
- dtype=features.dtype)
- stddev_rgb = tf.broadcast_to(stddev_rgb, tf.shape(features))
- features = features / stddev_rgb
-
- return features
-
-
-def decode_and_center_crop(image_bytes: tf.Tensor,
- image_size: int = IMAGE_SIZE,
- crop_padding: int = CROP_PADDING) -> tf.Tensor:
- """Crops to center of image with padding then scales image_size.
-
- Args:
- image_bytes: `Tensor` representing an image binary of arbitrary size.
- image_size: image height/width dimension.
- crop_padding: the padding size to use when centering the crop.
-
- Returns:
- A decoded and cropped image `Tensor`.
- """
- decoded = image_bytes.dtype != tf.string
- shape = (tf.shape(image_bytes) if decoded
- else tf.image.extract_jpeg_shape(image_bytes))
- image_height = shape[0]
- image_width = shape[1]
-
- padded_center_crop_size = tf.cast(
- ((image_size / (image_size + crop_padding)) *
- tf.cast(tf.minimum(image_height, image_width), tf.float32)),
- tf.int32)
-
- offset_height = ((image_height - padded_center_crop_size) + 1) // 2
- offset_width = ((image_width - padded_center_crop_size) + 1) // 2
- crop_window = tf.stack([offset_height, offset_width,
- padded_center_crop_size, padded_center_crop_size])
- if decoded:
- image = tf.image.crop_to_bounding_box(
- image_bytes,
- offset_height=offset_height,
- offset_width=offset_width,
- target_height=padded_center_crop_size,
- target_width=padded_center_crop_size)
- else:
- image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
-
- image = resize_image(image_bytes=image,
- height=image_size,
- width=image_size)
-
- return image
-
-
-def decode_crop_and_flip(image_bytes: tf.Tensor) -> tf.Tensor:
- """Crops an image to a random part of the image, then randomly flips.
-
- Args:
- image_bytes: `Tensor` representing an image binary of arbitrary size.
-
- Returns:
- A decoded and cropped image `Tensor`.
-
- """
- decoded = image_bytes.dtype != tf.string
- bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
- shape = (tf.shape(image_bytes) if decoded
- else tf.image.extract_jpeg_shape(image_bytes))
- sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
- shape,
- bounding_boxes=bbox,
- min_object_covered=0.1,
- aspect_ratio_range=[0.75, 1.33],
- area_range=[0.05, 1.0],
- max_attempts=100,
- use_image_if_no_bounding_boxes=True)
- bbox_begin, bbox_size, _ = sample_distorted_bounding_box
-
- # Reassemble the bounding box in the format the crop op requires.
- offset_height, offset_width, _ = tf.unstack(bbox_begin)
- target_height, target_width, _ = tf.unstack(bbox_size)
- crop_window = tf.stack([offset_height, offset_width,
- target_height, target_width])
- if decoded:
- cropped = tf.image.crop_to_bounding_box(
- image_bytes,
- offset_height=offset_height,
- offset_width=offset_width,
- target_height=target_height,
- target_width=target_width)
- else:
- cropped = tf.image.decode_and_crop_jpeg(image_bytes,
- crop_window,
- channels=3)
-
- # Flip to add a little more random distortion in.
- cropped = tf.image.random_flip_left_right(cropped)
- return cropped
-
-
-def resize_image(image_bytes: tf.Tensor,
- height: int = IMAGE_SIZE,
- width: int = IMAGE_SIZE) -> tf.Tensor:
- """Resizes an image to a given height and width.
-
- Args:
- image_bytes: `Tensor` representing an image binary of arbitrary size.
- height: image height dimension.
- width: image width dimension.
-
- Returns:
- A tensor containing the resized image.
-
- """
- return tf.compat.v1.image.resize(
- image_bytes, [height, width], method=tf.image.ResizeMethod.BILINEAR,
- align_corners=False)
-
-
-def preprocess_for_eval(
- image_bytes: tf.Tensor,
- image_size: int = IMAGE_SIZE,
- num_channels: int = 3,
- mean_subtract: bool = False,
- standardize: bool = False,
- dtype: tf.dtypes.DType = tf.float32
-) -> tf.Tensor:
- """Preprocesses the given image for evaluation.
-
- Args:
- image_bytes: `Tensor` representing an image binary of arbitrary size.
- image_size: image height/width dimension.
- num_channels: number of image input channels.
- mean_subtract: whether or not to apply mean subtraction.
- standardize: whether or not to apply standardization.
- dtype: the dtype to convert the images to. Set to `None` to skip conversion.
-
- Returns:
- A preprocessed and normalized image `Tensor`.
- """
- images = decode_and_center_crop(image_bytes, image_size)
- images = tf.reshape(images, [image_size, image_size, num_channels])
-
- if mean_subtract:
- images = mean_image_subtraction(image_bytes=images, means=MEAN_RGB)
- if standardize:
- images = standardize_image(image_bytes=images, stddev=STDDEV_RGB)
- if dtype is not None:
- images = tf.image.convert_image_dtype(images, dtype=dtype)
-
- return images
-
-
-def load_eval_image(filename: Text, image_size: int = IMAGE_SIZE) -> tf.Tensor:
- """Reads an image from the filesystem and applies image preprocessing.
-
- Args:
- filename: a filename path of an image.
- image_size: image height/width dimension.
-
- Returns:
- A preprocessed and normalized image `Tensor`.
- """
- image_bytes = tf.io.read_file(filename)
- image = preprocess_for_eval(image_bytes, image_size)
-
- return image
-
-
-def build_eval_dataset(filenames: List[Text],
- labels: List[int] = None,
- image_size: int = IMAGE_SIZE,
- batch_size: int = 1) -> tf.Tensor:
- """Builds a tf.data.Dataset from a list of filenames and labels.
-
- Args:
- filenames: a list of filename paths of images.
- labels: a list of labels corresponding to each image.
- image_size: image height/width dimension.
- batch_size: the batch size used by the dataset
-
- Returns:
- A preprocessed and normalized image `Tensor`.
- """
- if labels is None:
- labels = [0] * len(filenames)
-
- filenames = tf.constant(filenames)
- labels = tf.constant(labels)
- dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
-
- dataset = dataset.map(
- lambda filename, label: (load_eval_image(filename, image_size), label))
- dataset = dataset.batch(batch_size)
-
- return dataset
-
-
-def preprocess_for_train(image_bytes: tf.Tensor,
- image_size: int = IMAGE_SIZE,
- augmenter: Optional[augment.ImageAugment] = None,
- mean_subtract: bool = False,
- standardize: bool = False,
- dtype: tf.dtypes.DType = tf.float32) -> tf.Tensor:
- """Preprocesses the given image for training.
-
- Args:
- image_bytes: `Tensor` representing an image binary of
- arbitrary size of dtype tf.uint8.
- image_size: image height/width dimension.
- augmenter: the image augmenter to apply.
- mean_subtract: whether or not to apply mean subtraction.
- standardize: whether or not to apply standardization.
- dtype: the dtype to convert the images to. Set to `None` to skip conversion.
-
- Returns:
- A preprocessed and normalized image `Tensor`.
- """
- images = decode_crop_and_flip(image_bytes=image_bytes)
- images = resize_image(images, height=image_size, width=image_size)
- if mean_subtract:
- images = mean_image_subtraction(image_bytes=images, means=MEAN_RGB)
- if standardize:
- images = standardize_image(image_bytes=images, stddev=STDDEV_RGB)
- if augmenter is not None:
- images = augmenter.distort(images)
- if dtype is not None:
- images = tf.image.convert_image_dtype(images, dtype)
-
- return images
diff --git a/spaces/NCTCMumbai/NCTC/models/research/cognitive_mapping_and_planning/tfcode/tf_utils.py b/spaces/NCTCMumbai/NCTC/models/research/cognitive_mapping_and_planning/tfcode/tf_utils.py
deleted file mode 100644
index 5f96d8ff5ce7473f0ec49096abcbac274e6c4fcc..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/research/cognitive_mapping_and_planning/tfcode/tf_utils.py
+++ /dev/null
@@ -1,840 +0,0 @@
-# Copyright 2016 The TensorFlow Authors All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-
-import numpy as np
-import sys
-import tensorflow as tf
-import src.utils as utils
-import logging
-from tensorflow.contrib import slim
-from tensorflow.contrib.metrics.python.ops import confusion_matrix_ops
-from tensorflow.contrib.slim import arg_scope
-from tensorflow.contrib.slim.nets import resnet_v2
-from tensorflow.python.framework import dtypes
-from tensorflow.python.ops import array_ops
-from tensorflow.python.ops import check_ops
-from tensorflow.python.ops import math_ops
-from tensorflow.python.ops import variable_scope
-sys.path.insert(0, '../slim')
-from preprocessing import inception_preprocessing as ip
-
-resnet_v2_50 = resnet_v2.resnet_v2_50
-
-
-def custom_residual_block(x, neurons, kernel_size, stride, name, is_training,
- wt_decay=0.0001, use_residual=True,
- residual_stride_conv=True, conv_fn=slim.conv2d,
- batch_norm_param=None):
-
- # batch norm x and relu
- init_var = np.sqrt(2.0/(kernel_size**2)/neurons)
- with arg_scope([conv_fn],
- weights_regularizer=slim.l2_regularizer(wt_decay),
- weights_initializer=tf.random_normal_initializer(stddev=init_var),
- biases_initializer=tf.zeros_initializer()):
-
- if batch_norm_param is None:
- batch_norm_param = {'center': True, 'scale': False,
- 'activation_fn':tf.nn.relu,
- 'is_training': is_training}
-
- y = slim.batch_norm(x, scope=name+'_bn', **batch_norm_param)
-
- y = conv_fn(y, num_outputs=neurons, kernel_size=kernel_size, stride=stride,
- activation_fn=None, scope=name+'_1',
- normalizer_fn=slim.batch_norm,
- normalizer_params=batch_norm_param)
-
- y = conv_fn(y, num_outputs=neurons, kernel_size=kernel_size,
- stride=1, activation_fn=None, scope=name+'_2')
-
- if use_residual:
- if stride != 1 or x.get_shape().as_list()[-1] != neurons:
- batch_norm_param_ = dict(batch_norm_param)
- batch_norm_param_['activation_fn'] = None
- x = conv_fn(x, num_outputs=neurons, kernel_size=1,
- stride=stride if residual_stride_conv else 1,
- activation_fn=None, scope=name+'_0_1x1',
- normalizer_fn=slim.batch_norm,
- normalizer_params=batch_norm_param_)
- if not residual_stride_conv:
- x = slim.avg_pool2d(x, 1, stride=stride, scope=name+'_0_avg')
-
- y = tf.add(x, y, name=name+'_add')
-
- return y
-
-def step_gt_prob(step, step_number_op):
- # Change samping probability from 1 to -1 at step steps.
- with tf.name_scope('step_gt_prob'):
- out = tf.cond(tf.less(step_number_op, step),
- lambda: tf.constant(1.), lambda: tf.constant(-1.))
- return out
-
-def inverse_sigmoid_decay(k, global_step_op):
- with tf.name_scope('inverse_sigmoid_decay'):
- k = tf.constant(k, dtype=tf.float32)
- tmp = k*tf.exp(-tf.cast(global_step_op, tf.float32)/k)
- tmp = tmp / (1. + tmp)
- return tmp
-
-def dense_resample(im, flow_im, output_valid_mask, name='dense_resample'):
- """ Resample reward at particular locations.
- Args:
- im: ...xHxWxC matrix to sample from.
- flow_im: ...xHxWx2 matrix, samples the image using absolute offsets as given
- by the flow_im.
- """
- with tf.name_scope(name):
- valid_mask = None
-
- x, y = tf.unstack(flow_im, axis=-1)
- x = tf.cast(tf.reshape(x, [-1]), tf.float32)
- y = tf.cast(tf.reshape(y, [-1]), tf.float32)
-
- # constants
- shape = tf.unstack(tf.shape(im))
- channels = shape[-1]
- width = shape[-2]
- height = shape[-3]
- num_batch = tf.cast(tf.reduce_prod(tf.stack(shape[:-3])), 'int32')
- zero = tf.constant(0, dtype=tf.int32)
-
- # Round up and down.
- x0 = tf.cast(tf.floor(x), 'int32'); x1 = x0 + 1;
- y0 = tf.cast(tf.floor(y), 'int32'); y1 = y0 + 1;
-
- if output_valid_mask:
- valid_mask = tf.logical_and(
- tf.logical_and(tf.less_equal(x, tf.cast(width, tf.float32)-1.), tf.greater_equal(x, 0.)),
- tf.logical_and(tf.less_equal(y, tf.cast(height, tf.float32)-1.), tf.greater_equal(y, 0.)))
- valid_mask = tf.reshape(valid_mask, shape=shape[:-1] + [1])
-
- x0 = tf.clip_by_value(x0, zero, width-1)
- x1 = tf.clip_by_value(x1, zero, width-1)
- y0 = tf.clip_by_value(y0, zero, height-1)
- y1 = tf.clip_by_value(y1, zero, height-1)
-
- dim2 = width; dim1 = width * height;
-
- # Create base index
- base = tf.reshape(tf.range(num_batch) * dim1, shape=[-1,1])
- base = tf.reshape(tf.tile(base, [1, height*width]), shape=[-1])
-
- base_y0 = base + y0 * dim2
- base_y1 = base + y1 * dim2
- idx_a = base_y0 + x0
- idx_b = base_y1 + x0
- idx_c = base_y0 + x1
- idx_d = base_y1 + x1
-
- # use indices to lookup pixels in the flat image and restore channels dim
- sh = tf.stack([tf.constant(-1,dtype=tf.int32), channels])
- im_flat = tf.cast(tf.reshape(im, sh), dtype=tf.float32)
- pixel_a = tf.gather(im_flat, idx_a)
- pixel_b = tf.gather(im_flat, idx_b)
- pixel_c = tf.gather(im_flat, idx_c)
- pixel_d = tf.gather(im_flat, idx_d)
-
- # and finally calculate interpolated values
- x1_f = tf.to_float(x1)
- y1_f = tf.to_float(y1)
-
- wa = tf.expand_dims(((x1_f - x) * (y1_f - y)), 1)
- wb = tf.expand_dims((x1_f - x) * (1.0 - (y1_f - y)), 1)
- wc = tf.expand_dims(((1.0 - (x1_f - x)) * (y1_f - y)), 1)
- wd = tf.expand_dims(((1.0 - (x1_f - x)) * (1.0 - (y1_f - y))), 1)
-
- output = tf.add_n([wa * pixel_a, wb * pixel_b, wc * pixel_c, wd * pixel_d])
- output = tf.reshape(output, shape=tf.shape(im))
- return output, valid_mask
-
-def get_flow(t, theta, map_size, name_scope='gen_flow'):
- """
- Rotates the map by theta and translates the rotated map by t.
-
- Assume that the robot rotates by an angle theta and then moves forward by
- translation t. This function returns the flow field field. For every pixel in
- the new image it tells us which pixel in the original image it came from:
- NewI(x, y) = OldI(flow_x(x,y), flow_y(x,y)).
-
- Assume there is a point p in the original image. Robot rotates by R and moves
- forward by t. p1 = Rt*p; p2 = p1 - t; (the world moves in opposite direction.
- So, p2 = Rt*p - t, thus p2 came from R*(p2+t), which is what this function
- calculates.
-
- t: ... x 2 (translation for B batches of N motions each).
- theta: ... x 1 (rotation for B batches of N motions each).
-
- Output: ... x map_size x map_size x 2
- """
-
- with tf.name_scope(name_scope):
- tx, ty = tf.unstack(tf.reshape(t, shape=[-1, 1, 1, 1, 2]), axis=4)
- theta = tf.reshape(theta, shape=[-1, 1, 1, 1])
- c = tf.constant((map_size-1.)/2., dtype=tf.float32)
-
- x, y = np.meshgrid(np.arange(map_size), np.arange(map_size))
- x = tf.constant(x[np.newaxis, :, :, np.newaxis], dtype=tf.float32, name='x',
- shape=[1, map_size, map_size, 1])
- y = tf.constant(y[np.newaxis, :, :, np.newaxis], dtype=tf.float32, name='y',
- shape=[1,map_size, map_size, 1])
-
- x = x-(-tx+c)
- y = y-(-ty+c)
-
- sin_theta = tf.sin(theta)
- cos_theta = tf.cos(theta)
- xr = cos_theta*x - sin_theta*y
- yr = sin_theta*x + cos_theta*y
-
- xr = xr + c
- yr = yr + c
-
- flow = tf.stack([xr, yr], axis=-1)
- sh = tf.unstack(tf.shape(t), axis=0)
- sh = tf.stack(sh[:-1]+[tf.constant(_, dtype=tf.int32) for _ in [map_size, map_size, 2]])
- flow = tf.reshape(flow, shape=sh)
- return flow
-
-def distort_image(im, fast_mode=False):
- # All images in the same batch are transformed the same way, but over
- # iterations you see different distortions.
- # im should be float with values between 0 and 1.
- im_ = tf.reshape(im, shape=(-1,1,3))
- im_ = ip.apply_with_random_selector(
- im_, lambda x, ordering: ip.distort_color(x, ordering, fast_mode),
- num_cases=4)
- im_ = tf.reshape(im_, tf.shape(im))
- return im_
-
-def fc_network(x, neurons, wt_decay, name, num_pred=None, offset=0,
- batch_norm_param=None, dropout_ratio=0.0, is_training=None):
- if dropout_ratio > 0:
- assert(is_training is not None), \
- 'is_training needs to be defined when trainnig with dropout.'
-
- repr = []
- for i, neuron in enumerate(neurons):
- init_var = np.sqrt(2.0/neuron)
- if batch_norm_param is not None:
- x = slim.fully_connected(x, neuron, activation_fn=None,
- weights_initializer=tf.random_normal_initializer(stddev=init_var),
- weights_regularizer=slim.l2_regularizer(wt_decay),
- normalizer_fn=slim.batch_norm,
- normalizer_params=batch_norm_param,
- biases_initializer=tf.zeros_initializer(),
- scope='{:s}_{:d}'.format(name, offset+i))
- else:
- x = slim.fully_connected(x, neuron, activation_fn=tf.nn.relu,
- weights_initializer=tf.random_normal_initializer(stddev=init_var),
- weights_regularizer=slim.l2_regularizer(wt_decay),
- biases_initializer=tf.zeros_initializer(),
- scope='{:s}_{:d}'.format(name, offset+i))
- if dropout_ratio > 0:
- x = slim.dropout(x, keep_prob=1-dropout_ratio, is_training=is_training,
- scope='{:s}_{:d}'.format('dropout_'+name, offset+i))
- repr.append(x)
-
- if num_pred is not None:
- init_var = np.sqrt(2.0/num_pred)
- x = slim.fully_connected(x, num_pred,
- weights_regularizer=slim.l2_regularizer(wt_decay),
- weights_initializer=tf.random_normal_initializer(stddev=init_var),
- biases_initializer=tf.zeros_initializer(),
- activation_fn=None,
- scope='{:s}_pred'.format(name))
- return x, repr
-
-def concat_state_x_list(f, names):
- af = {}
- for i, k in enumerate(names):
- af[k] = np.concatenate([x[i] for x in f], axis=1)
- return af
-
-def concat_state_x(f, names):
- af = {}
- for k in names:
- af[k] = np.concatenate([x[k] for x in f], axis=1)
- # af[k] = np.swapaxes(af[k], 0, 1)
- return af
-
-def sample_action(rng, action_probs, optimal_action, sample_gt_prob,
- type='sample', combine_type='one_or_other'):
- optimal_action_ = optimal_action/np.sum(optimal_action+0., 1, keepdims=True)
- action_probs_ = action_probs/np.sum(action_probs+0.001, 1, keepdims=True)
- batch_size = action_probs_.shape[0]
-
- action = np.zeros((batch_size), dtype=np.int32)
- action_sample_wt = np.zeros((batch_size), dtype=np.float32)
- if combine_type == 'add':
- sample_gt_prob_ = np.minimum(np.maximum(sample_gt_prob, 0.), 1.)
-
- for i in range(batch_size):
- if combine_type == 'one_or_other':
- sample_gt = rng.rand() < sample_gt_prob
- if sample_gt: distr_ = optimal_action_[i,:]*1.
- else: distr_ = action_probs_[i,:]*1.
- elif combine_type == 'add':
- distr_ = optimal_action_[i,:]*sample_gt_prob_ + \
- (1.-sample_gt_prob_)*action_probs_[i,:]
- distr_ = distr_ / np.sum(distr_)
-
- if type == 'sample':
- action[i] = np.argmax(rng.multinomial(1, distr_, size=1))
- elif type == 'argmax':
- action[i] = np.argmax(distr_)
- action_sample_wt[i] = action_probs_[i, action[i]] / distr_[action[i]]
- return action, action_sample_wt
-
-def train_step_custom_online_sampling(sess, train_op, global_step,
- train_step_kwargs, mode='train'):
- m = train_step_kwargs['m']
- obj = train_step_kwargs['obj']
- rng_data = train_step_kwargs['rng_data']
- rng_action = train_step_kwargs['rng_action']
- writer = train_step_kwargs['writer']
- iters = train_step_kwargs['iters']
- num_steps = train_step_kwargs['num_steps']
- logdir = train_step_kwargs['logdir']
- dagger_sample_bn_false = train_step_kwargs['dagger_sample_bn_false']
- train_display_interval = train_step_kwargs['train_display_interval']
- if 'outputs' not in m.train_ops:
- m.train_ops['outputs'] = []
-
- s_ops = m.summary_ops[mode]
- val_additional_ops = []
-
- # Print all variables here.
- if False:
- v = tf.get_collection(tf.GraphKeys.VARIABLES)
- v_op = [_.value() for _ in v]
- v_op_value = sess.run(v_op)
-
- filter = lambda x, y: 'Adam' in x.name
- # filter = lambda x, y: np.is_any_nan(y)
- ind = [i for i, (_, __) in enumerate(zip(v, v_op_value)) if filter(_, __)]
- v = [v[i] for i in ind]
- v_op_value = [v_op_value[i] for i in ind]
-
- for i in range(len(v)):
- logging.info('XXXX: variable: %30s, is_any_nan: %5s, norm: %f.',
- v[i].name, np.any(np.isnan(v_op_value[i])),
- np.linalg.norm(v_op_value[i]))
-
- tt = utils.Timer()
- for i in range(iters):
- tt.tic()
- # Sample a room.
- e = obj.sample_env(rng_data)
-
- # Initialize the agent.
- init_env_state = e.reset(rng_data)
-
- # Get and process the common data.
- input = e.get_common_data()
- input = e.pre_common_data(input)
- feed_dict = prepare_feed_dict(m.input_tensors['common'], input)
- if dagger_sample_bn_false:
- feed_dict[m.train_ops['batch_norm_is_training_op']] = False
- common_data = sess.run(m.train_ops['common'], feed_dict=feed_dict)
-
- states = []
- state_features = []
- state_targets = []
- net_state_to_input = []
- step_data_cache = []
- executed_actions = []
- rewards = []
- action_sample_wts = []
- states.append(init_env_state)
-
- net_state = sess.run(m.train_ops['init_state'], feed_dict=feed_dict)
- net_state = dict(zip(m.train_ops['state_names'], net_state))
- net_state_to_input.append(net_state)
- for j in range(num_steps):
- f = e.get_features(states[j], j)
- f = e.pre_features(f)
- f.update(net_state)
- f['step_number'] = np.ones((1,1,1), dtype=np.int32)*j
- state_features.append(f)
-
- feed_dict = prepare_feed_dict(m.input_tensors['step'], state_features[-1])
- optimal_action = e.get_optimal_action(states[j], j)
- for x, v in zip(m.train_ops['common'], common_data):
- feed_dict[x] = v
- if dagger_sample_bn_false:
- feed_dict[m.train_ops['batch_norm_is_training_op']] = False
- outs = sess.run([m.train_ops['step'], m.sample_gt_prob_op,
- m.train_ops['step_data_cache'],
- m.train_ops['updated_state'],
- m.train_ops['outputs']], feed_dict=feed_dict)
- action_probs = outs[0]
- sample_gt_prob = outs[1]
- step_data_cache.append(dict(zip(m.train_ops['step_data_cache'], outs[2])))
- net_state = outs[3]
- if hasattr(e, 'update_state'):
- outputs = outs[4]
- outputs = dict(zip(m.train_ops['output_names'], outputs))
- e.update_state(outputs, j)
- state_targets.append(e.get_targets(states[j], j))
-
- if j < num_steps-1:
- # Sample from action_probs and optimal action.
- action, action_sample_wt = sample_action(
- rng_action, action_probs, optimal_action, sample_gt_prob,
- m.sample_action_type, m.sample_action_combine_type)
- next_state, reward = e.take_action(states[j], action, j)
- executed_actions.append(action)
- states.append(next_state)
- rewards.append(reward)
- action_sample_wts.append(action_sample_wt)
- net_state = dict(zip(m.train_ops['state_names'], net_state))
- net_state_to_input.append(net_state)
-
- # Concatenate things together for training.
- rewards = np.array(rewards).T
- action_sample_wts = np.array(action_sample_wts).T
- executed_actions = np.array(executed_actions).T
- all_state_targets = concat_state_x(state_targets, e.get_targets_name())
- all_state_features = concat_state_x(state_features,
- e.get_features_name()+['step_number'])
- # all_state_net = concat_state_x(net_state_to_input,
- # m.train_ops['state_names'])
- all_step_data_cache = concat_state_x(step_data_cache,
- m.train_ops['step_data_cache'])
-
- dict_train = dict(input)
- dict_train.update(all_state_features)
- dict_train.update(all_state_targets)
- # dict_train.update(all_state_net)
- dict_train.update(net_state_to_input[0])
- dict_train.update(all_step_data_cache)
- dict_train.update({'rewards': rewards,
- 'action_sample_wts': action_sample_wts,
- 'executed_actions': executed_actions})
- feed_dict = prepare_feed_dict(m.input_tensors['train'], dict_train)
- for x in m.train_ops['step_data_cache']:
- feed_dict[x] = all_step_data_cache[x]
- if mode == 'train':
- n_step = sess.run(global_step)
-
- if np.mod(n_step, train_display_interval) == 0:
- total_loss, np_global_step, summary, print_summary = sess.run(
- [train_op, global_step, s_ops.summary_ops, s_ops.print_summary_ops],
- feed_dict=feed_dict)
- logging.error("")
- else:
- total_loss, np_global_step, summary = sess.run(
- [train_op, global_step, s_ops.summary_ops], feed_dict=feed_dict)
-
- if writer is not None and summary is not None:
- writer.add_summary(summary, np_global_step)
-
- should_stop = sess.run(m.should_stop_op)
-
- if mode != 'train':
- arop = [[] for j in range(len(s_ops.additional_return_ops))]
- for j in range(len(s_ops.additional_return_ops)):
- if s_ops.arop_summary_iters[j] < 0 or i < s_ops.arop_summary_iters[j]:
- arop[j] = s_ops.additional_return_ops[j]
- val = sess.run(arop, feed_dict=feed_dict)
- val_additional_ops.append(val)
- tt.toc(log_at=60, log_str='val timer {:d} / {:d}: '.format(i, iters),
- type='time')
-
- if mode != 'train':
- # Write the default val summaries.
- summary, print_summary, np_global_step = sess.run(
- [s_ops.summary_ops, s_ops.print_summary_ops, global_step])
- if writer is not None and summary is not None:
- writer.add_summary(summary, np_global_step)
-
- # write custom validation ops
- val_summarys = []
- val_additional_ops = zip(*val_additional_ops)
- if len(s_ops.arop_eval_fns) > 0:
- val_metric_summary = tf.summary.Summary()
- for i in range(len(s_ops.arop_eval_fns)):
- val_summary = None
- if s_ops.arop_eval_fns[i] is not None:
- val_summary = s_ops.arop_eval_fns[i](val_additional_ops[i],
- np_global_step, logdir,
- val_metric_summary,
- s_ops.arop_summary_iters[i])
- val_summarys.append(val_summary)
- if writer is not None:
- writer.add_summary(val_metric_summary, np_global_step)
-
- # Return the additional val_ops
- total_loss = (val_additional_ops, val_summarys)
- should_stop = None
-
- return total_loss, should_stop
-
-def train_step_custom_v2(sess, train_op, global_step, train_step_kwargs,
- mode='train'):
- m = train_step_kwargs['m']
- obj = train_step_kwargs['obj']
- rng = train_step_kwargs['rng']
- writer = train_step_kwargs['writer']
- iters = train_step_kwargs['iters']
- logdir = train_step_kwargs['logdir']
- train_display_interval = train_step_kwargs['train_display_interval']
-
- s_ops = m.summary_ops[mode]
- val_additional_ops = []
-
- # Print all variables here.
- if False:
- v = tf.get_collection(tf.GraphKeys.VARIABLES)
- v_op = [_.value() for _ in v]
- v_op_value = sess.run(v_op)
-
- filter = lambda x, y: 'Adam' in x.name
- # filter = lambda x, y: np.is_any_nan(y)
- ind = [i for i, (_, __) in enumerate(zip(v, v_op_value)) if filter(_, __)]
- v = [v[i] for i in ind]
- v_op_value = [v_op_value[i] for i in ind]
-
- for i in range(len(v)):
- logging.info('XXXX: variable: %30s, is_any_nan: %5s, norm: %f.',
- v[i].name, np.any(np.isnan(v_op_value[i])),
- np.linalg.norm(v_op_value[i]))
-
- tt = utils.Timer()
- for i in range(iters):
- tt.tic()
- e = obj.sample_env(rng)
- rngs = e.gen_rng(rng)
- input_data = e.gen_data(*rngs)
- input_data = e.pre_data(input_data)
- feed_dict = prepare_feed_dict(m.input_tensors, input_data)
-
- if mode == 'train':
- n_step = sess.run(global_step)
-
- if np.mod(n_step, train_display_interval) == 0:
- total_loss, np_global_step, summary, print_summary = sess.run(
- [train_op, global_step, s_ops.summary_ops, s_ops.print_summary_ops],
- feed_dict=feed_dict)
- else:
- total_loss, np_global_step, summary = sess.run(
- [train_op, global_step, s_ops.summary_ops],
- feed_dict=feed_dict)
-
- if writer is not None and summary is not None:
- writer.add_summary(summary, np_global_step)
-
- should_stop = sess.run(m.should_stop_op)
-
- if mode != 'train':
- arop = [[] for j in range(len(s_ops.additional_return_ops))]
- for j in range(len(s_ops.additional_return_ops)):
- if s_ops.arop_summary_iters[j] < 0 or i < s_ops.arop_summary_iters[j]:
- arop[j] = s_ops.additional_return_ops[j]
- val = sess.run(arop, feed_dict=feed_dict)
- val_additional_ops.append(val)
- tt.toc(log_at=60, log_str='val timer {:d} / {:d}: '.format(i, iters),
- type='time')
-
- if mode != 'train':
- # Write the default val summaries.
- summary, print_summary, np_global_step = sess.run(
- [s_ops.summary_ops, s_ops.print_summary_ops, global_step])
- if writer is not None and summary is not None:
- writer.add_summary(summary, np_global_step)
-
- # write custom validation ops
- val_summarys = []
- val_additional_ops = zip(*val_additional_ops)
- if len(s_ops.arop_eval_fns) > 0:
- val_metric_summary = tf.summary.Summary()
- for i in range(len(s_ops.arop_eval_fns)):
- val_summary = None
- if s_ops.arop_eval_fns[i] is not None:
- val_summary = s_ops.arop_eval_fns[i](val_additional_ops[i],
- np_global_step, logdir,
- val_metric_summary,
- s_ops.arop_summary_iters[i])
- val_summarys.append(val_summary)
- if writer is not None:
- writer.add_summary(val_metric_summary, np_global_step)
-
- # Return the additional val_ops
- total_loss = (val_additional_ops, val_summarys)
- should_stop = None
-
- return total_loss, should_stop
-
-def train_step_custom(sess, train_op, global_step, train_step_kwargs,
- mode='train'):
- m = train_step_kwargs['m']
- params = train_step_kwargs['params']
- rng = train_step_kwargs['rng']
- writer = train_step_kwargs['writer']
- iters = train_step_kwargs['iters']
- gen_rng = train_step_kwargs['gen_rng']
- logdir = train_step_kwargs['logdir']
- gen_data = train_step_kwargs['gen_data']
- pre_data = train_step_kwargs['pre_data']
- train_display_interval = train_step_kwargs['train_display_interval']
-
- val_additional_ops = []
- # Print all variables here.
- if False:
- v = tf.get_collection(tf.GraphKeys.VARIABLES)
- for _ in v:
- val = sess.run(_.value())
- logging.info('variable: %30s, is_any_nan: %5s, norm: %f.', _.name,
- np.any(np.isnan(val)), np.linalg.norm(val))
-
- for i in range(iters):
- rngs = gen_rng(params, rng)
- input_data = gen_data(params, *rngs)
- input_data = pre_data(params, input_data)
- feed_dict = prepare_feed_dict(m.input_tensors, input_data)
-
- if mode == 'train':
- n_step = sess.run(global_step)
-
- if np.mod(n_step, train_display_interval) == 0:
- total_loss, np_global_step, summary, print_summary = sess.run(
- [train_op, global_step, m.summary_op[mode], m.print_summary_op[mode]],
- feed_dict=feed_dict)
- else:
- total_loss, np_global_step, summary = sess.run(
- [train_op, global_step, m.summary_op[mode]],
- feed_dict=feed_dict)
-
- if writer is not None:
- writer.add_summary(summary, np_global_step)
-
- should_stop = sess.run(m.should_stop_op)
-
- if mode == 'val':
- val = sess.run(m.agg_update_op[mode] + m.additional_return_op[mode],
- feed_dict=feed_dict)
- val_additional_ops.append(val[len(m.agg_update_op[mode]):])
-
- if mode == 'val':
- summary, print_summary, np_global_step = sess.run(
- [m.summary_op[mode], m.print_summary_op[mode], global_step])
- if writer is not None:
- writer.add_summary(summary, np_global_step)
- sess.run([m.agg_reset_op[mode]])
-
- # write custom validation ops
- if m.eval_metrics_fn[mode] is not None:
- val_metric_summary = m.eval_metrics_fn[mode](val_additional_ops,
- np_global_step, logdir)
- if writer is not None:
- writer.add_summary(val_metric_summary, np_global_step)
-
- total_loss = val_additional_ops
- should_stop = None
-
- return total_loss, should_stop
-
-def setup_training(loss_op, initial_learning_rate, steps_per_decay,
- learning_rate_decay, momentum, max_steps,
- sync=False, adjust_lr_sync=True,
- num_workers=1, replica_id=0, vars_to_optimize=None,
- clip_gradient_norm=0, typ=None, momentum2=0.999,
- adam_eps=1e-8):
- if sync and adjust_lr_sync:
- initial_learning_rate = initial_learning_rate * num_workers
- max_steps = np.int(max_steps / num_workers)
- steps_per_decay = np.int(steps_per_decay / num_workers)
-
- global_step_op = slim.get_or_create_global_step()
- lr_op = tf.train.exponential_decay(initial_learning_rate,
- global_step_op, steps_per_decay, learning_rate_decay, staircase=True)
- if typ == 'sgd':
- optimizer = tf.train.MomentumOptimizer(lr_op, momentum)
- elif typ == 'adam':
- optimizer = tf.train.AdamOptimizer(learning_rate=lr_op, beta1=momentum,
- beta2=momentum2, epsilon=adam_eps)
-
- if sync:
-
- sync_optimizer = tf.train.SyncReplicasOptimizer(optimizer,
- replicas_to_aggregate=num_workers,
- replica_id=replica_id,
- total_num_replicas=num_workers)
- train_op = slim.learning.create_train_op(loss_op, sync_optimizer,
- variables_to_train=vars_to_optimize,
- clip_gradient_norm=clip_gradient_norm)
- else:
- sync_optimizer = None
- train_op = slim.learning.create_train_op(loss_op, optimizer,
- variables_to_train=vars_to_optimize,
- clip_gradient_norm=clip_gradient_norm)
- should_stop_op = tf.greater_equal(global_step_op, max_steps)
- return lr_op, global_step_op, train_op, should_stop_op, optimizer, sync_optimizer
-
-def add_value_to_summary(metric_summary, tag, val, log=True, tag_str=None):
- """Adds a scalar summary to the summary object. Optionally also logs to
- logging."""
- new_value = metric_summary.value.add();
- new_value.tag = tag
- new_value.simple_value = val
- if log:
- if tag_str is None:
- tag_str = tag + '%f'
- logging.info(tag_str, val)
-
-def add_scalar_summary_op(tensor, name=None,
- summary_key='summaries', print_summary_key='print_summaries', prefix=''):
- collections = []
- op = tf.summary.scalar(name, tensor, collections=collections)
- if summary_key != print_summary_key:
- tf.add_to_collection(summary_key, op)
-
- op = tf.Print(op, [tensor], ' {:-<25s}: '.format(name) + prefix)
- tf.add_to_collection(print_summary_key, op)
- return op
-
-def setup_inputs(inputs):
- input_tensors = {}
- input_shapes = {}
- for (name, typ, sz) in inputs:
- _ = tf.placeholder(typ, shape=sz, name=name)
- input_tensors[name] = _
- input_shapes[name] = sz
- return input_tensors, input_shapes
-
-def prepare_feed_dict(input_tensors, inputs):
- feed_dict = {}
- for n in input_tensors.keys():
- feed_dict[input_tensors[n]] = inputs[n].astype(input_tensors[n].dtype.as_numpy_dtype)
- return feed_dict
-
-def simple_add_summaries(summarize_ops, summarize_names,
- summary_key='summaries',
- print_summary_key='print_summaries', prefix=''):
- for op, name, in zip(summarize_ops, summarize_names):
- add_scalar_summary_op(op, name, summary_key, print_summary_key, prefix)
-
- summary_op = tf.summary.merge_all(summary_key)
- print_summary_op = tf.summary.merge_all(print_summary_key)
- return summary_op, print_summary_op
-
-def add_summary_ops(m, summarize_ops, summarize_names, to_aggregate=None,
- summary_key='summaries',
- print_summary_key='print_summaries', prefix=''):
- if type(to_aggregate) != list:
- to_aggregate = [to_aggregate for _ in summarize_ops]
-
- # set up aggregating metrics
- if np.any(to_aggregate):
- agg_ops = []
- for op, name, to_agg in zip(summarize_ops, summarize_names, to_aggregate):
- if to_agg:
- # agg_ops.append(slim.metrics.streaming_mean(op, return_reset_op=True))
- agg_ops.append(tf.contrib.metrics.streaming_mean(op))
- # agg_ops.append(tf.contrib.metrics.streaming_mean(op, return_reset_op=True))
- else:
- agg_ops.append([None, None, None])
-
- # agg_values_op, agg_update_op, agg_reset_op = zip(*agg_ops)
- # agg_update_op = [x for x in agg_update_op if x is not None]
- # agg_reset_op = [x for x in agg_reset_op if x is not None]
- agg_values_op, agg_update_op = zip(*agg_ops)
- agg_update_op = [x for x in agg_update_op if x is not None]
- agg_reset_op = [tf.no_op()]
- else:
- agg_values_op = [None for _ in to_aggregate]
- agg_update_op = [tf.no_op()]
- agg_reset_op = [tf.no_op()]
-
- for op, name, to_agg, agg_op in zip(summarize_ops, summarize_names, to_aggregate, agg_values_op):
- if to_agg:
- add_scalar_summary_op(agg_op, name, summary_key, print_summary_key, prefix)
- else:
- add_scalar_summary_op(op, name, summary_key, print_summary_key, prefix)
-
- summary_op = tf.summary.merge_all(summary_key)
- print_summary_op = tf.summary.merge_all(print_summary_key)
- return summary_op, print_summary_op, agg_update_op, agg_reset_op
-
-
-
-def accum_val_ops(outputs, names, global_step, output_dir, metric_summary, N):
- """Processes the collected outputs to compute AP for action prediction.
-
- Args:
- outputs : List of scalar ops to summarize.
- names : Name of the scalar ops.
- global_step : global_step.
- output_dir : where to store results.
- metric_summary : summary object to add summaries to.
- N : number of outputs to process.
- """
- outs = []
- if N >= 0:
- outputs = outputs[:N]
- for i in range(len(outputs[0])):
- scalar = np.array(map(lambda x: x[i], outputs))
- assert(scalar.ndim == 1)
- add_value_to_summary(metric_summary, names[i], np.mean(scalar),
- tag_str='{:>27s}: [{:s}]: %f'.format(names[i], ''))
- outs.append(np.mean(scalar))
- return outs
-
-def get_default_summary_ops():
- return utils.Foo(summary_ops=None, print_summary_ops=None,
- additional_return_ops=[], arop_summary_iters=[],
- arop_eval_fns=[])
-
-
-def simple_summaries(summarize_ops, summarize_names, mode, to_aggregate=False,
- scope_name='summary'):
-
- if type(to_aggregate) != list:
- to_aggregate = [to_aggregate for _ in summarize_ops]
-
- summary_key = '{:s}_summaries'.format(mode)
- print_summary_key = '{:s}_print_summaries'.format(mode)
- prefix=' [{:s}]: '.format(mode)
-
- # Default ops for things that dont need to be aggregated.
- if not np.all(to_aggregate):
- for op, name, to_agg in zip(summarize_ops, summarize_names, to_aggregate):
- if not to_agg:
- add_scalar_summary_op(op, name, summary_key, print_summary_key, prefix)
- summary_ops = tf.summary.merge_all(summary_key)
- print_summary_ops = tf.summary.merge_all(print_summary_key)
- else:
- summary_ops = tf.no_op()
- print_summary_ops = tf.no_op()
-
- # Default ops for things that dont need to be aggregated.
- if np.any(to_aggregate):
- additional_return_ops = [[summarize_ops[i]
- for i, x in enumerate(to_aggregate )if x]]
- arop_summary_iters = [-1]
- s_names = ['{:s}/{:s}'.format(scope_name, summarize_names[i])
- for i, x in enumerate(to_aggregate) if x]
- fn = lambda outputs, global_step, output_dir, metric_summary, N: \
- accum_val_ops(outputs, s_names, global_step, output_dir, metric_summary,
- N)
- arop_eval_fns = [fn]
- else:
- additional_return_ops = []
- arop_summary_iters = []
- arop_eval_fns = []
- return summary_ops, print_summary_ops, additional_return_ops, \
- arop_summary_iters, arop_eval_fns
diff --git a/spaces/NCTCMumbai/NCTC/models/research/cognitive_planning/visualization_utils.py b/spaces/NCTCMumbai/NCTC/models/research/cognitive_planning/visualization_utils.py
deleted file mode 100644
index 7a7aeb50561dba9f8713d12a184ddd824c3c0e19..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/research/cognitive_planning/visualization_utils.py
+++ /dev/null
@@ -1,733 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-
-"""A set of functions that are used for visualization.
-
-These functions often receive an image, perform some visualization on the image.
-The functions do not return a value, instead they modify the image itself.
-
-"""
-import collections
-import functools
-# Set headless-friendly backend.
-import matplotlib; matplotlib.use('Agg') # pylint: disable=multiple-statements
-import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
-import numpy as np
-import PIL.Image as Image
-import PIL.ImageColor as ImageColor
-import PIL.ImageDraw as ImageDraw
-import PIL.ImageFont as ImageFont
-import six
-import tensorflow as tf
-
-import standard_fields as fields
-
-
-_TITLE_LEFT_MARGIN = 10
-_TITLE_TOP_MARGIN = 10
-STANDARD_COLORS = [
- 'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
- 'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
- 'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
- 'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
- 'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
- 'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
- 'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
- 'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
- 'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
- 'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
- 'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
- 'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
- 'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
- 'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
- 'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
- 'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
- 'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
- 'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
- 'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
- 'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
- 'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
- 'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
- 'WhiteSmoke', 'Yellow', 'YellowGreen'
-]
-
-
-def save_image_array_as_png(image, output_path):
- """Saves an image (represented as a numpy array) to PNG.
-
- Args:
- image: a numpy array with shape [height, width, 3].
- output_path: path to which image should be written.
- """
- image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
- with tf.gfile.Open(output_path, 'w') as fid:
- image_pil.save(fid, 'PNG')
-
-
-def encode_image_array_as_png_str(image):
- """Encodes a numpy array into a PNG string.
-
- Args:
- image: a numpy array with shape [height, width, 3].
-
- Returns:
- PNG encoded image string.
- """
- image_pil = Image.fromarray(np.uint8(image))
- output = six.BytesIO()
- image_pil.save(output, format='PNG')
- png_string = output.getvalue()
- output.close()
- return png_string
-
-
-def draw_bounding_box_on_image_array(image,
- ymin,
- xmin,
- ymax,
- xmax,
- color='red',
- thickness=4,
- display_str_list=(),
- use_normalized_coordinates=True):
- """Adds a bounding box to an image (numpy array).
-
- Bounding box coordinates can be specified in either absolute (pixel) or
- normalized coordinates by setting the use_normalized_coordinates argument.
-
- Args:
- image: a numpy array with shape [height, width, 3].
- ymin: ymin of bounding box.
- xmin: xmin of bounding box.
- ymax: ymax of bounding box.
- xmax: xmax of bounding box.
- color: color to draw bounding box. Default is red.
- thickness: line thickness. Default value is 4.
- display_str_list: list of strings to display in box
- (each to be shown on its own line).
- use_normalized_coordinates: If True (default), treat coordinates
- ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
- coordinates as absolute.
- """
- image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
- draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,
- thickness, display_str_list,
- use_normalized_coordinates)
- np.copyto(image, np.array(image_pil))
-
-
-def draw_bounding_box_on_image(image,
- ymin,
- xmin,
- ymax,
- xmax,
- color='red',
- thickness=4,
- display_str_list=(),
- use_normalized_coordinates=True):
- """Adds a bounding box to an image.
-
- Bounding box coordinates can be specified in either absolute (pixel) or
- normalized coordinates by setting the use_normalized_coordinates argument.
-
- Each string in display_str_list is displayed on a separate line above the
- bounding box in black text on a rectangle filled with the input 'color'.
- If the top of the bounding box extends to the edge of the image, the strings
- are displayed below the bounding box.
-
- Args:
- image: a PIL.Image object.
- ymin: ymin of bounding box.
- xmin: xmin of bounding box.
- ymax: ymax of bounding box.
- xmax: xmax of bounding box.
- color: color to draw bounding box. Default is red.
- thickness: line thickness. Default value is 4.
- display_str_list: list of strings to display in box
- (each to be shown on its own line).
- use_normalized_coordinates: If True (default), treat coordinates
- ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
- coordinates as absolute.
- """
- draw = ImageDraw.Draw(image)
- im_width, im_height = image.size
- if use_normalized_coordinates:
- (left, right, top, bottom) = (xmin * im_width, xmax * im_width,
- ymin * im_height, ymax * im_height)
- else:
- (left, right, top, bottom) = (xmin, xmax, ymin, ymax)
- draw.line([(left, top), (left, bottom), (right, bottom),
- (right, top), (left, top)], width=thickness, fill=color)
- try:
- font = ImageFont.truetype('arial.ttf', 24)
- except IOError:
- font = ImageFont.load_default()
-
- # If the total height of the display strings added to the top of the bounding
- # box exceeds the top of the image, stack the strings below the bounding box
- # instead of above.
- display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
- # Each display_str has a top and bottom margin of 0.05x.
- total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
-
- if top > total_display_str_height:
- text_bottom = top
- else:
- text_bottom = bottom + total_display_str_height
- # Reverse list and print from bottom to top.
- for display_str in display_str_list[::-1]:
- text_width, text_height = font.getsize(display_str)
- margin = np.ceil(0.05 * text_height)
- draw.rectangle(
- [(left, text_bottom - text_height - 2 * margin), (left + text_width,
- text_bottom)],
- fill=color)
- draw.text(
- (left + margin, text_bottom - text_height - margin),
- display_str,
- fill='black',
- font=font)
- text_bottom -= text_height - 2 * margin
-
-
-def draw_bounding_boxes_on_image_array(image,
- boxes,
- color='red',
- thickness=4,
- display_str_list_list=()):
- """Draws bounding boxes on image (numpy array).
-
- Args:
- image: a numpy array object.
- boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
- The coordinates are in normalized format between [0, 1].
- color: color to draw bounding box. Default is red.
- thickness: line thickness. Default value is 4.
- display_str_list_list: list of list of strings.
- a list of strings for each bounding box.
- The reason to pass a list of strings for a
- bounding box is that it might contain
- multiple labels.
-
- Raises:
- ValueError: if boxes is not a [N, 4] array
- """
- image_pil = Image.fromarray(image)
- draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,
- display_str_list_list)
- np.copyto(image, np.array(image_pil))
-
-
-def draw_bounding_boxes_on_image(image,
- boxes,
- color='red',
- thickness=4,
- display_str_list_list=()):
- """Draws bounding boxes on image.
-
- Args:
- image: a PIL.Image object.
- boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax).
- The coordinates are in normalized format between [0, 1].
- color: color to draw bounding box. Default is red.
- thickness: line thickness. Default value is 4.
- display_str_list_list: list of list of strings.
- a list of strings for each bounding box.
- The reason to pass a list of strings for a
- bounding box is that it might contain
- multiple labels.
-
- Raises:
- ValueError: if boxes is not a [N, 4] array
- """
- boxes_shape = boxes.shape
- if not boxes_shape:
- return
- if len(boxes_shape) != 2 or boxes_shape[1] != 4:
- raise ValueError('Input must be of size [N, 4]')
- for i in range(boxes_shape[0]):
- display_str_list = ()
- if display_str_list_list:
- display_str_list = display_str_list_list[i]
- draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
- boxes[i, 3], color, thickness, display_str_list)
-
-
-def _visualize_boxes(image, boxes, classes, scores, category_index, **kwargs):
- return visualize_boxes_and_labels_on_image_array(
- image, boxes, classes, scores, category_index=category_index, **kwargs)
-
-
-def _visualize_boxes_and_masks(image, boxes, classes, scores, masks,
- category_index, **kwargs):
- return visualize_boxes_and_labels_on_image_array(
- image,
- boxes,
- classes,
- scores,
- category_index=category_index,
- instance_masks=masks,
- **kwargs)
-
-
-def _visualize_boxes_and_keypoints(image, boxes, classes, scores, keypoints,
- category_index, **kwargs):
- return visualize_boxes_and_labels_on_image_array(
- image,
- boxes,
- classes,
- scores,
- category_index=category_index,
- keypoints=keypoints,
- **kwargs)
-
-
-def _visualize_boxes_and_masks_and_keypoints(
- image, boxes, classes, scores, masks, keypoints, category_index, **kwargs):
- return visualize_boxes_and_labels_on_image_array(
- image,
- boxes,
- classes,
- scores,
- category_index=category_index,
- instance_masks=masks,
- keypoints=keypoints,
- **kwargs)
-
-
-def draw_bounding_boxes_on_image_tensors(images,
- boxes,
- classes,
- scores,
- category_index,
- instance_masks=None,
- keypoints=None,
- max_boxes_to_draw=20,
- min_score_thresh=0.2,
- use_normalized_coordinates=True):
- """Draws bounding boxes, masks, and keypoints on batch of image tensors.
-
- Args:
- images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
- channels will be ignored.
- boxes: [N, max_detections, 4] float32 tensor of detection boxes.
- classes: [N, max_detections] int tensor of detection classes. Note that
- classes are 1-indexed.
- scores: [N, max_detections] float32 tensor of detection scores.
- category_index: a dict that maps integer ids to category dicts. e.g.
- {1: {1: 'dog'}, 2: {2: 'cat'}, ...}
- instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with
- instance masks.
- keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2]
- with keypoints.
- max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20.
- min_score_thresh: Minimum score threshold for visualization. Default 0.2.
- use_normalized_coordinates: Whether to assume boxes and kepoints are in
- normalized coordinates (as opposed to absolute coordiantes).
- Default is True.
-
- Returns:
- 4D image tensor of type uint8, with boxes drawn on top.
- """
- # Additional channels are being ignored.
- images = images[:, :, :, 0:3]
- visualization_keyword_args = {
- 'use_normalized_coordinates': use_normalized_coordinates,
- 'max_boxes_to_draw': max_boxes_to_draw,
- 'min_score_thresh': min_score_thresh,
- 'agnostic_mode': False,
- 'line_thickness': 4
- }
-
- if instance_masks is not None and keypoints is None:
- visualize_boxes_fn = functools.partial(
- _visualize_boxes_and_masks,
- category_index=category_index,
- **visualization_keyword_args)
- elems = [images, boxes, classes, scores, instance_masks]
- elif instance_masks is None and keypoints is not None:
- visualize_boxes_fn = functools.partial(
- _visualize_boxes_and_keypoints,
- category_index=category_index,
- **visualization_keyword_args)
- elems = [images, boxes, classes, scores, keypoints]
- elif instance_masks is not None and keypoints is not None:
- visualize_boxes_fn = functools.partial(
- _visualize_boxes_and_masks_and_keypoints,
- category_index=category_index,
- **visualization_keyword_args)
- elems = [images, boxes, classes, scores, instance_masks, keypoints]
- else:
- visualize_boxes_fn = functools.partial(
- _visualize_boxes,
- category_index=category_index,
- **visualization_keyword_args)
- elems = [images, boxes, classes, scores]
-
- def draw_boxes(image_and_detections):
- """Draws boxes on image."""
- image_with_boxes = tf.py_func(visualize_boxes_fn, image_and_detections,
- tf.uint8)
- return image_with_boxes
-
- images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False)
- return images
-
-
-def draw_side_by_side_evaluation_image(eval_dict,
- category_index,
- max_boxes_to_draw=20,
- min_score_thresh=0.2,
- use_normalized_coordinates=True):
- """Creates a side-by-side image with detections and groundtruth.
-
- Bounding boxes (and instance masks, if available) are visualized on both
- subimages.
-
- Args:
- eval_dict: The evaluation dictionary returned by
- eval_util.result_dict_for_single_example().
- category_index: A category index (dictionary) produced from a labelmap.
- max_boxes_to_draw: The maximum number of boxes to draw for detections.
- min_score_thresh: The minimum score threshold for showing detections.
- use_normalized_coordinates: Whether to assume boxes and kepoints are in
- normalized coordinates (as opposed to absolute coordiantes).
- Default is True.
-
- Returns:
- A [1, H, 2 * W, C] uint8 tensor. The subimage on the left corresponds to
- detections, while the subimage on the right corresponds to groundtruth.
- """
- detection_fields = fields.DetectionResultFields()
- input_data_fields = fields.InputDataFields()
- instance_masks = None
- if detection_fields.detection_masks in eval_dict:
- instance_masks = tf.cast(
- tf.expand_dims(eval_dict[detection_fields.detection_masks], axis=0),
- tf.uint8)
- keypoints = None
- if detection_fields.detection_keypoints in eval_dict:
- keypoints = tf.expand_dims(
- eval_dict[detection_fields.detection_keypoints], axis=0)
- groundtruth_instance_masks = None
- if input_data_fields.groundtruth_instance_masks in eval_dict:
- groundtruth_instance_masks = tf.cast(
- tf.expand_dims(
- eval_dict[input_data_fields.groundtruth_instance_masks], axis=0),
- tf.uint8)
- images_with_detections = draw_bounding_boxes_on_image_tensors(
- eval_dict[input_data_fields.original_image],
- tf.expand_dims(eval_dict[detection_fields.detection_boxes], axis=0),
- tf.expand_dims(eval_dict[detection_fields.detection_classes], axis=0),
- tf.expand_dims(eval_dict[detection_fields.detection_scores], axis=0),
- category_index,
- instance_masks=instance_masks,
- keypoints=keypoints,
- max_boxes_to_draw=max_boxes_to_draw,
- min_score_thresh=min_score_thresh,
- use_normalized_coordinates=use_normalized_coordinates)
- images_with_groundtruth = draw_bounding_boxes_on_image_tensors(
- eval_dict[input_data_fields.original_image],
- tf.expand_dims(eval_dict[input_data_fields.groundtruth_boxes], axis=0),
- tf.expand_dims(eval_dict[input_data_fields.groundtruth_classes], axis=0),
- tf.expand_dims(
- tf.ones_like(
- eval_dict[input_data_fields.groundtruth_classes],
- dtype=tf.float32),
- axis=0),
- category_index,
- instance_masks=groundtruth_instance_masks,
- keypoints=None,
- max_boxes_to_draw=None,
- min_score_thresh=0.0,
- use_normalized_coordinates=use_normalized_coordinates)
- return tf.concat([images_with_detections, images_with_groundtruth], axis=2)
-
-
-def draw_keypoints_on_image_array(image,
- keypoints,
- color='red',
- radius=2,
- use_normalized_coordinates=True):
- """Draws keypoints on an image (numpy array).
-
- Args:
- image: a numpy array with shape [height, width, 3].
- keypoints: a numpy array with shape [num_keypoints, 2].
- color: color to draw the keypoints with. Default is red.
- radius: keypoint radius. Default value is 2.
- use_normalized_coordinates: if True (default), treat keypoint values as
- relative to the image. Otherwise treat them as absolute.
- """
- image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
- draw_keypoints_on_image(image_pil, keypoints, color, radius,
- use_normalized_coordinates)
- np.copyto(image, np.array(image_pil))
-
-
-def draw_keypoints_on_image(image,
- keypoints,
- color='red',
- radius=2,
- use_normalized_coordinates=True):
- """Draws keypoints on an image.
-
- Args:
- image: a PIL.Image object.
- keypoints: a numpy array with shape [num_keypoints, 2].
- color: color to draw the keypoints with. Default is red.
- radius: keypoint radius. Default value is 2.
- use_normalized_coordinates: if True (default), treat keypoint values as
- relative to the image. Otherwise treat them as absolute.
- """
- draw = ImageDraw.Draw(image)
- im_width, im_height = image.size
- keypoints_x = [k[1] for k in keypoints]
- keypoints_y = [k[0] for k in keypoints]
- if use_normalized_coordinates:
- keypoints_x = tuple([im_width * x for x in keypoints_x])
- keypoints_y = tuple([im_height * y for y in keypoints_y])
- for keypoint_x, keypoint_y in zip(keypoints_x, keypoints_y):
- draw.ellipse([(keypoint_x - radius, keypoint_y - radius),
- (keypoint_x + radius, keypoint_y + radius)],
- outline=color, fill=color)
-
-
-def draw_mask_on_image_array(image, mask, color='red', alpha=0.4):
- """Draws mask on an image.
-
- Args:
- image: uint8 numpy array with shape (img_height, img_height, 3)
- mask: a uint8 numpy array of shape (img_height, img_height) with
- values between either 0 or 1.
- color: color to draw the keypoints with. Default is red.
- alpha: transparency value between 0 and 1. (default: 0.4)
-
- Raises:
- ValueError: On incorrect data type for image or masks.
- """
- if image.dtype != np.uint8:
- raise ValueError('`image` not of type np.uint8')
- if mask.dtype != np.uint8:
- raise ValueError('`mask` not of type np.uint8')
- if np.any(np.logical_and(mask != 1, mask != 0)):
- raise ValueError('`mask` elements should be in [0, 1]')
- if image.shape[:2] != mask.shape:
- raise ValueError('The image has spatial dimensions %s but the mask has '
- 'dimensions %s' % (image.shape[:2], mask.shape))
- rgb = ImageColor.getrgb(color)
- pil_image = Image.fromarray(image)
-
- solid_color = np.expand_dims(
- np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
- pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
- pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L')
- pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
- np.copyto(image, np.array(pil_image.convert('RGB')))
-
-
-def visualize_boxes_and_labels_on_image_array(
- image,
- boxes,
- classes,
- scores,
- category_index,
- instance_masks=None,
- instance_boundaries=None,
- keypoints=None,
- use_normalized_coordinates=False,
- max_boxes_to_draw=20,
- min_score_thresh=.5,
- agnostic_mode=False,
- line_thickness=4,
- groundtruth_box_visualization_color='black',
- skip_scores=False,
- skip_labels=False):
- """Overlay labeled boxes on an image with formatted scores and label names.
-
- This function groups boxes that correspond to the same location
- and creates a display string for each detection and overlays these
- on the image. Note that this function modifies the image in place, and returns
- that same image.
-
- Args:
- image: uint8 numpy array with shape (img_height, img_width, 3)
- boxes: a numpy array of shape [N, 4]
- classes: a numpy array of shape [N]. Note that class indices are 1-based,
- and match the keys in the label map.
- scores: a numpy array of shape [N] or None. If scores=None, then
- this function assumes that the boxes to be plotted are groundtruth
- boxes and plot all boxes as black with no classes or scores.
- category_index: a dict containing category dictionaries (each holding
- category index `id` and category name `name`) keyed by category indices.
- instance_masks: a numpy array of shape [N, image_height, image_width] with
- values ranging between 0 and 1, can be None.
- instance_boundaries: a numpy array of shape [N, image_height, image_width]
- with values ranging between 0 and 1, can be None.
- keypoints: a numpy array of shape [N, num_keypoints, 2], can
- be None
- use_normalized_coordinates: whether boxes is to be interpreted as
- normalized coordinates or not.
- max_boxes_to_draw: maximum number of boxes to visualize. If None, draw
- all boxes.
- min_score_thresh: minimum score threshold for a box to be visualized
- agnostic_mode: boolean (default: False) controlling whether to evaluate in
- class-agnostic mode or not. This mode will display scores but ignore
- classes.
- line_thickness: integer (default: 4) controlling line width of the boxes.
- groundtruth_box_visualization_color: box color for visualizing groundtruth
- boxes
- skip_scores: whether to skip score when drawing a single detection
- skip_labels: whether to skip label when drawing a single detection
-
- Returns:
- uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.
- """
- # Create a display string (and color) for every box location, group any boxes
- # that correspond to the same location.
- box_to_display_str_map = collections.defaultdict(list)
- box_to_color_map = collections.defaultdict(str)
- box_to_instance_masks_map = {}
- box_to_instance_boundaries_map = {}
- box_to_keypoints_map = collections.defaultdict(list)
- if not max_boxes_to_draw:
- max_boxes_to_draw = boxes.shape[0]
- for i in range(min(max_boxes_to_draw, boxes.shape[0])):
- if scores is None or scores[i] > min_score_thresh:
- box = tuple(boxes[i].tolist())
- if instance_masks is not None:
- box_to_instance_masks_map[box] = instance_masks[i]
- if instance_boundaries is not None:
- box_to_instance_boundaries_map[box] = instance_boundaries[i]
- if keypoints is not None:
- box_to_keypoints_map[box].extend(keypoints[i])
- if scores is None:
- box_to_color_map[box] = groundtruth_box_visualization_color
- else:
- display_str = ''
- if not skip_labels:
- if not agnostic_mode:
- if classes[i] in category_index.keys():
- class_name = category_index[classes[i]]['name']
- else:
- class_name = 'N/A'
- display_str = str(class_name)
- if not skip_scores:
- if not display_str:
- display_str = '{}%'.format(int(100*scores[i]))
- else:
- display_str = '{}: {}%'.format(display_str, int(100*scores[i]))
- box_to_display_str_map[box].append(display_str)
- if agnostic_mode:
- box_to_color_map[box] = 'DarkOrange'
- else:
- box_to_color_map[box] = STANDARD_COLORS[
- classes[i] % len(STANDARD_COLORS)]
-
- # Draw all boxes onto image.
- for box, color in box_to_color_map.items():
- ymin, xmin, ymax, xmax = box
- if instance_masks is not None:
- draw_mask_on_image_array(
- image,
- box_to_instance_masks_map[box],
- color=color
- )
- if instance_boundaries is not None:
- draw_mask_on_image_array(
- image,
- box_to_instance_boundaries_map[box],
- color='red',
- alpha=1.0
- )
- draw_bounding_box_on_image_array(
- image,
- ymin,
- xmin,
- ymax,
- xmax,
- color=color,
- thickness=line_thickness,
- display_str_list=box_to_display_str_map[box],
- use_normalized_coordinates=use_normalized_coordinates)
- if keypoints is not None:
- draw_keypoints_on_image_array(
- image,
- box_to_keypoints_map[box],
- color=color,
- radius=line_thickness / 2,
- use_normalized_coordinates=use_normalized_coordinates)
-
- return image
-
-
-def add_cdf_image_summary(values, name):
- """Adds a tf.summary.image for a CDF plot of the values.
-
- Normalizes `values` such that they sum to 1, plots the cumulative distribution
- function and creates a tf image summary.
-
- Args:
- values: a 1-D float32 tensor containing the values.
- name: name for the image summary.
- """
- def cdf_plot(values):
- """Numpy function to plot CDF."""
- normalized_values = values / np.sum(values)
- sorted_values = np.sort(normalized_values)
- cumulative_values = np.cumsum(sorted_values)
- fraction_of_examples = (np.arange(cumulative_values.size, dtype=np.float32)
- / cumulative_values.size)
- fig = plt.figure(frameon=False)
- ax = fig.add_subplot('111')
- ax.plot(fraction_of_examples, cumulative_values)
- ax.set_ylabel('cumulative normalized values')
- ax.set_xlabel('fraction of examples')
- fig.canvas.draw()
- width, height = fig.get_size_inches() * fig.get_dpi()
- image = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8').reshape(
- 1, int(height), int(width), 3)
- return image
- cdf_plot = tf.py_func(cdf_plot, [values], tf.uint8)
- tf.summary.image(name, cdf_plot)
-
-
-def add_hist_image_summary(values, bins, name):
- """Adds a tf.summary.image for a histogram plot of the values.
-
- Plots the histogram of values and creates a tf image summary.
-
- Args:
- values: a 1-D float32 tensor containing the values.
- bins: bin edges which will be directly passed to np.histogram.
- name: name for the image summary.
- """
-
- def hist_plot(values, bins):
- """Numpy function to plot hist."""
- fig = plt.figure(frameon=False)
- ax = fig.add_subplot('111')
- y, x = np.histogram(values, bins=bins)
- ax.plot(x[:-1], y)
- ax.set_ylabel('count')
- ax.set_xlabel('value')
- fig.canvas.draw()
- width, height = fig.get_size_inches() * fig.get_dpi()
- image = np.fromstring(
- fig.canvas.tostring_rgb(), dtype='uint8').reshape(
- 1, int(height), int(width), 3)
- return image
- hist_plot = tf.py_func(hist_plot, [values, bins], tf.uint8)
- tf.summary.image(name, hist_plot)
diff --git a/spaces/Nattiman/chatsummarizercapstoneproject/app.py b/spaces/Nattiman/chatsummarizercapstoneproject/app.py
deleted file mode 100644
index 0746e106d930080f53ed39ca2ace6504c9cca4ee..0000000000000000000000000000000000000000
--- a/spaces/Nattiman/chatsummarizercapstoneproject/app.py
+++ /dev/null
@@ -1,107 +0,0 @@
-import json
-import pickle
-import streamlit as st
-#from streamlit_option_menu import option_menu
-import pandas as pd
-import numpy as np
-import transformers
-import streamlit as st
-from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
-#from transformers import sentencepiece
-import sentencepiece as spm
-import webbrowser
-
-
-activities=["Home","Summarize","Training Dataset","Model info","About Us"]
-choice=st.sidebar.selectbox("OPTIONS",activities)
-
-
-##################################
-model = AutoModelForSeq2SeqLM.from_pretrained("SmonF/YTFineTunePegasus")
-
-#model.save_pretrained("my_model_checkpoint")
-
-tokenizer = AutoTokenizer.from_pretrained("google/pegasus-cnn_dailymail")
-
-##################################
-
-#st.markdown(hide_st_style, unsafe_allow_html=True)
-
-
-#text_input = st.text_input("Enter some text:")
-
-####with open('src/fine_tuned_model.pkl', 'rb') as f:
-#### model = pickle.load(f)
- #tokenizer = AutoTokenizer.from_pretrained("tuner007/pegasus_paraphrase")
-#### tokenizer = AutoTokenizer.from_pretrained("google/pegasus-cnn_dailymail")
-
-#model=AutoModelForSeq2SeqLM.from_pretrained('Nattiman/CHATSUMMARY')
-#tokenizer = AutoTokenizer.from_pretrained("Nattiman/CHATSUMMARY")
-
-
-
-def generate_summary(text, max_length=100, min_length=30):
- summarizer = pipeline("summarization", model=model, tokenizer=tokenizer)
- summary = summarizer(text, max_length=max_length,
- min_length=min_length, do_sample=True)
-
- return summary[0]["summary_text"]
-
-
-
-
-####################################################################
-def main():
-
-
- if choice=="Home":
- #st.image("src\samsung.jpg",width=680)
- #st.image("https://drive.google.com/file/d/1Cy73Cr1CZ90E_E4iWmf_xOAWNTAKmqcj/view?usp=share_link")
- st.markdown("
SAMSUNG INNOVATION CAMPUS
", unsafe_allow_html=True)
- st.subheader("Welcome to Our Dialogue Summarizer App!")
- st.markdown(">*This is a capstone project developed by Group-6 under the supervision of SIC team*.")
- st.markdown("---")
- #txt = st.text_area('Enter your long dialogue below please')
- #txt_out = st.text_area('Output summary')
-
- elif choice=="Summarize":
- st.markdown("
Dialog Summarizing Tool
", unsafe_allow_html=True)
- input_dialogue=st.text_area("Enter Your Dialogue Below","Type here")
- if st.button("Summarize"):
- summary = generate_summary(input_dialogue)
- st.markdown("*
", unsafe_allow_html=True)
- st.header("Dataset Card for SAMSum Corpus")
- st.markdown("> *Dataset Summary\n The SAMSum dataset contains about 16k messenger-like conversations with summaries. Conversations were created and written down by linguists fluent in English. Linguists were asked to create conversations similar to those they write on a daily basis, reflecting the proportion of topics of their real-life messenger convesations. The style and register are diversified - conversations could be informal, semi-formal or formal, they may contain slang words, emoticons and typos. Then, the conversations were annotated with summaries. It was assumed that summaries should be a concise brief of what people talked about in the conversation in third person. The SAMSum dataset was prepared by Samsung R&D Institute Poland and is distributed for research purposes* (non-commercial licence: CC BY-NC-ND 4.0)",unsafe_allow_html=True)
- #st.button("Read more")
- url = 'https://huggingface.co/datasets/samsum'
-
-# Create a button with the label 'Go to Google'
- if st.button('Read More'):
- webbrowser.open_new_tab(url)
- elif choice=="Model info":
- st.markdown("
PEGASUS MODEL Info
", unsafe_allow_html=True)
- st.markdown(">*The Pegasus model was proposed in PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu on Dec 18, 2019. According to the abstract, Pegasus’ pretraining task is intentionally similar to summarization: important sentences are removed/masked from an input document and are generated together as one output sequence from the remaining sentences, similar to an extractive summary. Pegasus achieves SOTA summarization performance on all 12 downstream tasks, as measured by ROUGE and human eval. This model was contributed by sshleifer. The Authors’ code can be found here. Tips: Sequence-to-sequence model with the same encoder-decoder model architecture as BART. Pegasus is pre-trained jointly on two self-supervised objective functions: Masked Language Modeling (MLM) and a novel summarization specific pretraining objective, called Gap Sentence Generation (GSG). MLM: encoder input tokens are randomly replaced by a mask tokens and have to be predicted by the encoder (like in BERT) GSG: whole encoder input sentences are replaced by a second mask token and fed to the decoder, but which has a causal mask to hide the future words like a regular auto-regressive transformer decoder.*")
- url = 'https://huggingface.co/google/pegasus-cnn_dailymail'
-
-# Create a button with the label 'Go to Google'
- if st.button('Read More'):
- webbrowser.open_new_tab(url)
-
- elif choice=="About Us":
- st.markdown("
ABOUT US
", unsafe_allow_html=True)
- st.markdown("> *Welcome to our website! We are a team of passionate individuals dedicated to providing new NLP based services to our customers. Our goal is to create a positive impact in the world by leveraging our expertise and innovative solutions. With passion and resilence and through experience, we strive to exceed expectations and build lasting relationships with our clients. We, the developers of this capstone project are from Fujairah Emirate. We proudly own this project as it was the product of our hectic crash course that was offered by Samsung Innovation Campus. Thank you for choosing us, and we look forward to serving you!*")
-
- st.markdown(">*
Developers Name List
*", unsafe_allow_html=True)
- st.markdown("*
This project was developed by: Nathan Berhe, Smon Fitwi, Dawit Andebrhan, Bereket Kibreab, Eyasu Tesfamichael, Milkias Butsuamlak
*", unsafe_allow_html=True)
- st.markdown("*
This project was developed under the supervision of Mrs. Rabab, Mr.Mrad and Mr. Marc, honourable staffs of SIC program
*", unsafe_allow_html=True)
-
-
-
-
-if __name__=='__main__':
- main()
diff --git a/spaces/Nee001/bing0/tests/parse.ts b/spaces/Nee001/bing0/tests/parse.ts
deleted file mode 100644
index 92940fe6315f1d7cb2b267ba5e5a7e26460a1de3..0000000000000000000000000000000000000000
--- a/spaces/Nee001/bing0/tests/parse.ts
+++ /dev/null
@@ -1,13 +0,0 @@
-import { promises as fs } from 'fs'
-import { join } from 'path'
-import { parseHeadersFromCurl } from '@/lib/utils'
-
-(async () => {
- const content = await fs.readFile(join(__dirname, './fixtures/curl.txt'), 'utf-8')
- const headers = parseHeadersFromCurl(content)
- console.log(headers)
-
- const cmdContent = await fs.readFile(join(__dirname, './fixtures/cmd.txt'), 'utf-8')
- const cmdHeaders = parseHeadersFromCurl(cmdContent)
- console.log(cmdHeaders)
-})()
diff --git a/spaces/NingKanae/anime-voice-generator/text/__init__.py b/spaces/NingKanae/anime-voice-generator/text/__init__.py
deleted file mode 100644
index 663c4b6416affb53c9dc56dddbc8b2b65d4bf518..0000000000000000000000000000000000000000
--- a/spaces/NingKanae/anime-voice-generator/text/__init__.py
+++ /dev/null
@@ -1,57 +0,0 @@
-""" from https://github.com/keithito/tacotron """
-from text import cleaners
-from text.symbols import symbols
-
-
-# Mappings from symbol to numeric ID and vice versa:
-_symbol_to_id = {s: i for i, s in enumerate(symbols)}
-_id_to_symbol = {i: s for i, s in enumerate(symbols)}
-
-
-def text_to_sequence(text, symbols, cleaner_names):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- cleaner_names: names of the cleaner functions to run the text through
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
- sequence = []
-
- clean_text = _clean_text(text, cleaner_names)
- for symbol in clean_text:
- if symbol not in _symbol_to_id.keys():
- continue
- symbol_id = _symbol_to_id[symbol]
- sequence += [symbol_id]
- return sequence, clean_text
-
-
-def cleaned_text_to_sequence(cleaned_text):
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
- Args:
- text: string to convert to a sequence
- Returns:
- List of integers corresponding to the symbols in the text
- '''
- sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()]
- return sequence
-
-
-def sequence_to_text(sequence):
- '''Converts a sequence of IDs back to a string'''
- result = ''
- for symbol_id in sequence:
- s = _id_to_symbol[symbol_id]
- result += s
- return result
-
-
-def _clean_text(text, cleaner_names):
- for name in cleaner_names:
- cleaner = getattr(cleaners, name)
- if not cleaner:
- raise Exception('Unknown cleaner: %s' % name)
- text = cleaner(text)
- return text
diff --git a/spaces/Nyari/Super-Resolution-Anime-Diffusion/Waifu2x/Readme.md b/spaces/Nyari/Super-Resolution-Anime-Diffusion/Waifu2x/Readme.md
deleted file mode 100644
index bc528c3474faeff4784aecfc44c9fd8aeac092b6..0000000000000000000000000000000000000000
--- a/spaces/Nyari/Super-Resolution-Anime-Diffusion/Waifu2x/Readme.md
+++ /dev/null
@@ -1,167 +0,0 @@
-# Waifu2x
-
- Re-implementation on the original [waifu2x](https://github.com/nagadomi/waifu2x) in PyTorch with additional super resolution models. This repo is mainly used to explore interesting super resolution models. User-friendly tools may not be available now ><.
-
-## Dependencies
-* Python 3x
-* [PyTorch](https://pytorch.org/) >= 1 ( > 0.41 shall also work, but not guarantee)
-* [Nvidia/Apex](https://github.com/NVIDIA/apex/) (used for mixed precision training, you may use the [python codes](https://github.com/NVIDIA/apex/tree/master/apex/fp16_utils) directly)
-
-Optinal: Nvidia GPU. Model inference (32 fp only) can run in cpu only.
-
-## What's New
-* Add [CARN Model (Fast, Accurate, and Lightweight Super-Resolution with Cascading Residual Network)](https://github.com/nmhkahn/CARN-pytorch). Model Codes are adapted from the authors's [github repo](https://github.com/nmhkahn/CARN-pytorch). I add [Spatial Channel Squeeze Excitation](https://arxiv.org/abs/1709.01507) and swap all 1x1 convolution with 3x3 standard convolutions. The model is trained in fp 16 with Nvidia's [apex](https://github.com/NVIDIA/apex). Details and plots on model variant can be found in [docs/CARN](./docs/CARN)
-
-* Dilated Convolution seems less effective (if not make the model worse) in super resolution, though it brings some improvement in image segmentation, especially when dilated rate increases and then decreases. Further investigation is needed.
-
-## How to Use
-Compare the input image and upscaled image
-```python
-from utils.prepare_images import *
-from Models import *
-from torchvision.utils import save_image
-model_cran_v2 = CARN_V2(color_channels=3, mid_channels=64, conv=nn.Conv2d,
- single_conv_size=3, single_conv_group=1,
- scale=2, activation=nn.LeakyReLU(0.1),
- SEBlock=True, repeat_blocks=3, atrous=(1, 1, 1))
-
-model_cran_v2 = network_to_half(model_cran_v2)
-checkpoint = "model_check_points/CRAN_V2/CARN_model_checkpoint.pt"
-model_cran_v2.load_state_dict(torch.load(checkpoint, 'cpu'))
-# if use GPU, then comment out the next line so it can use fp16.
-model_cran_v2 = model_cran_v2.float()
-
-demo_img = "input_image.png"
-img = Image.open(demo_img).convert("RGB")
-
-# origin
-img_t = to_tensor(img).unsqueeze(0)
-
-# used to compare the origin
-img = img.resize((img.size[0] // 2, img.size[1] // 2), Image.BICUBIC)
-
-# overlapping split
-# if input image is too large, then split it into overlapped patches
-# details can be found at [here](https://github.com/nagadomi/waifu2x/issues/238)
-img_splitter = ImageSplitter(seg_size=64, scale_factor=2, boarder_pad_size=3)
-img_patches = img_splitter.split_img_tensor(img, scale_method=None, img_pad=0)
-with torch.no_grad():
- out = [model_cran_v2(i) for i in img_patches]
-img_upscale = img_splitter.merge_img_tensor(out)
-
-final = torch.cat([img_t, img_upscale])
-save_image(final, 'out.png', nrow=2)
-```
-
- ## Training
-
- If possible, fp16 training is preferred because it is much faster with minimal quality decrease.
-
- Sample training script is available in `train.py`, but you may need to change some liens.
-
- ### Image Processing
- Original images are all at least 3k x 3K. I downsample them by LANCZOS so that one side has at most 2048, then I randomly cut them into 256x256 patches as target and use 128x128 with jpeg noise as input images. All input patches have at least 14 kb, and they are stored in SQLite with BLOB format. SQlite seems to have [better performance](https://www.sqlite.org/intern-v-extern-blob.html) than file system for small objects. H5 file format may not be optimal because of its larger size.
-
- Although convolutions can take in any sizes of images, the content of image matters. For real life images, small patches may maintain color,brightness, etc variances in small regions, but for digital drawn images, colors are added in block areas. A small patch may end up showing entirely one color, and the model has little to learn.
-
- For example, the following two plots come from CARN and have the same settings, including initial parameters. Both training loss and ssim are lower for 64x64, but they perform worse in test time compared to 128x128.
-
- 
- 
-
-
-Downsampling methods are uniformly chosen among ```[PIL.Image.BILINEAR, PIL.Image.BICUBIC, PIL.Image.LANCZOS]``` , so different patches in the same image might be down-scaled in different ways.
-
-Image noise are from JPEG format only. They are added by re-encoding PNG images into PIL's JPEG data with various quality. Noise level 1 means quality ranges uniformly from [75, 95]; level 2 means quality ranges uniformly from [50, 75].
-
-
- ## Models
- Models are tuned and modified with extra features.
-
-
-* [DCSCN 12](https://github.com/jiny2001/dcscn-super-resolution)
-
-* [CRAN](https://github.com/nmhkahn/CARN-pytorch)
-
- #### From [Waifu2x](https://github.com/nagadomi/waifu2x)
- * [Upconv7](https://github.com/nagadomi/waifu2x/blob/7d156917ae1113ab847dab15c75db7642231e7fa/lib/srcnn.lua#L360)
-
- * [Vgg_7](https://github.com/nagadomi/waifu2x/blob/7d156917ae1113ab847dab15c75db7642231e7fa/lib/srcnn.lua#L334)
-
- * [Cascaded Residual U-Net with SEBlock](https://github.com/nagadomi/waifu2x/blob/7d156917ae1113ab847dab15c75db7642231e7fa/lib/srcnn.lua#L514) (PyTorch codes are not available and under testing)
-
- #### Models Comparison
- Images are from [Key: サマボケ(Summer Pocket)](http://key.visualarts.gr.jp/summer/).
-
- The left column is the original image, and the right column is bicubic, DCSCN, CRAN_V2
-
-
-
-
-
-
-
-
- ##### Scores
- The list will be updated after I add more models.
-
-Images are twitter icons (PNG) from [Key: サマボケ(Summer Pocket)](http://key.visualarts.gr.jp/summer/). They are cropped into non-overlapping 96x96 patches and down-scaled by 2. Then images are re-encoded into JPEG format with quality from [75, 95]. Scores are PSNR and MS-SSIM.
-
-| | Total Parameters | BICUBIC | Random* |
-| :---: | :---: | :---: | :---: |
-| CRAN V2| 2,149,607 | 34.0985 (0.9924) | 34.0509 (0.9922) |
-| DCSCN 12 |1,889,974 | 31.5358 (0.9851) | 31.1457 (0.9834) |
-| Upconv 7| 552,480| 31.4566 (0.9788) | 30.9492 (0.9772) |
-
-*uniformly select down scale methods from Image.BICUBIC, Image.BILINEAR, Image.LANCZOS.
-
-
-
-
-
- #### DCSCN
-[Fast and Accurate Image Super Resolution by Deep CNN with Skip Connection and Network in Network](https://github.com/jiny2001/dcscn-super-resolution#fast-and-accurate-image-super-resolution-by-deep-cnn-with-skip-connection-and-network-in-network)
-
- DCSCN is very interesting as it has relatively quick forward computation, and both the shallow model (layerr 8) and deep model (layer 12) are quick to train. The settings are different from the paper.
-
- * I use exponential decay to decrease the number of feature filters in each layer. [Here](https://github.com/jiny2001/dcscn-super-resolution/blob/a868775930c6b36922897b0203468f3f1481e935/DCSCN.py#L204) is the original filter decay method.
-
- * I also increase the reconstruction filters from 48 to 128.
-
- * All activations are replaced by SELU. Dropout and weight decay are not added neither because they significantly increase the training time.
-
- * The loss function is changed from MSE to L1.
- According to [Loss Functions for Image Restoration with Neural
-Networks](https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=4&cad=rja&uact=8&ved=0ahUKEwi7kuGt_7_bAhXrqVQKHRqhCcUQFghUMAM&url=http%3A%2F%2Fresearch.nvidia.com%2Fsites%2Fdefault%2Ffiles%2Fpubs%2F2017-03_Loss-Functions-for%2Fcomparison_tci.pdf&usg=AOvVaw1p0ndOKRH2ZaEsumO7d_bA), L1 seems to be more robust and converges faster than MSE. But the authors find the results from L1 and MSE are [similar](https://github.com/jiny2001/dcscn-super-resolution/issues/29).
-
-
- I need to thank jiny2001 (one of the paper's author) to test the difference of SELU and PRELU. SELU seems more stable and has fewer parameters to train. It is a good drop in replacement
- >layers=8, filters=96 and dataset=yang91+bsd200.
- 
- The details can be found in [here]( https://github.com/jiny2001/dcscn-super-resolution/issues/29).
-
-
-
- A pre-trained 12-layer model as well as model parameters are available. The model run time is around 3-5 times of Waifu2x. The output quality is usually visually indistinguishable, but its PSNR and SSIM are bit higher. Though, such comparison is not fair since the 12-layer model has around 1,889,974 parameters, 5 times more than waifu2x's Upconv_7 model.
-
- #### CARN
- Channels are set to 64 across all blocks, so residual adds are very effective. Increase the channels to 128 lower the loss curve a little bit but doubles the total parameters from 0.9 Millions to 3 Millions. 32 Channels has much worse performance. Increasing the number of cascaded blocks from 3 to 5 doesn't lower the loss a lot.
-
- SE Blocks seems to have the most obvious improvement without increasing the computation a lot. Partial based padding seems have little effect if not decrease the quality. Atrous convolution is slower about 10%-20% than normal convolution in Pytorch 1.0, but there are no obvious improvement.
-
-Another more effective model is to add upscaled input image to the final convolution. A simple bilinear upscaled image seems sufficient.
-
-More examples on model configurations can be found in [docs/CARN folder](./docs/CARN/carn_plot_loss.md)
-
-
-
-
-
-### Waifu2x Original Models
-Models can load waifu2x's pre-trained weights. The function ```forward_checkpoint``` sets the ```nn.LeakyReLU``` to compute data inplace.
-
-#### Upconv_7
-Original waifu2x's model. PyTorch's implementation with cpu only is around 5 times longer for large images. The output images have very close PSNR and SSIM scores compared to images generated from the [caffe version](https://github.com/lltcggie/waifu2x-caffe) , thought they are not identical.
-
-#### Vgg_7
-Not tested yet, but it is ready to use.
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/roberta/wsc/wsc_criterion.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/roberta/wsc/wsc_criterion.py
deleted file mode 100644
index ed0251fdecc3573228ad271f1090aaf914b48cd1..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/roberta/wsc/wsc_criterion.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-
-import torch
-import torch.nn.functional as F
-from fairseq import utils
-from fairseq.criterions import LegacyFairseqCriterion, register_criterion
-from fairseq.data import encoders
-
-
-@register_criterion("wsc")
-class WSCCriterion(LegacyFairseqCriterion):
- def __init__(self, args, task):
- super().__init__(args, task)
- if self.args.save_predictions is not None:
- self.prediction_h = open(self.args.save_predictions, "w")
- else:
- self.prediction_h = None
- self.bpe = encoders.build_bpe(args.bpe)
- self.tokenizer = encoders.build_tokenizer(args.tokenizer)
-
- def __del__(self):
- if self.prediction_h is not None:
- self.prediction_h.close()
-
- @staticmethod
- def add_args(parser):
- """Add criterion-specific arguments to the parser."""
- parser.add_argument("--wsc-margin-alpha", type=float, metavar="A", default=1.0)
- parser.add_argument("--wsc-margin-beta", type=float, metavar="B", default=0.0)
- parser.add_argument(
- "--wsc-cross-entropy",
- action="store_true",
- help="use cross entropy formulation instead of margin loss",
- )
- parser.add_argument(
- "--save-predictions", metavar="FILE", help="file to save predictions to"
- )
-
- def get_masked_input(self, tokens, mask):
- masked_tokens = tokens.clone()
- masked_tokens[mask] = self.task.mask
- return masked_tokens
-
- def get_lprobs(self, model, tokens, mask):
- logits, _ = model(src_tokens=self.get_masked_input(tokens, mask))
- lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float)
- scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1)
- mask = mask.type_as(scores)
- scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1)
- return scores
-
- def get_loss(self, query_lprobs, cand_lprobs):
- if self.args.wsc_cross_entropy:
- return F.cross_entropy(
- torch.cat([query_lprobs, cand_lprobs]).unsqueeze(0),
- query_lprobs.new([0]).long(),
- )
- else:
- return (
- -query_lprobs
- + self.args.wsc_margin_alpha
- * (cand_lprobs - query_lprobs + self.args.wsc_margin_beta).clamp(min=0)
- ).sum()
-
- def forward(self, model, sample, reduce=True):
- # compute loss and accuracy
- loss, nloss = 0.0, 0
- ncorrect, nqueries = 0, 0
-
- for i, label in enumerate(sample["labels"]):
- query_lprobs = self.get_lprobs(
- model,
- sample["query_tokens"][i].unsqueeze(0),
- sample["query_masks"][i].unsqueeze(0),
- )
- cand_lprobs = self.get_lprobs(
- model,
- sample["candidate_tokens"][i],
- sample["candidate_masks"][i],
- )
-
- pred = (query_lprobs >= cand_lprobs).all().item()
-
- if label is not None:
- label = 1 if label else 0
- ncorrect += 1 if pred == label else 0
- nqueries += 1
-
- if label:
- # only compute a loss for positive instances
- nloss += 1
- loss += self.get_loss(query_lprobs, cand_lprobs)
-
- id = sample["id"][i].item()
- if self.prediction_h is not None:
- print("{}\t{}\t{}".format(id, pred, label), file=self.prediction_h)
-
- if nloss == 0:
- loss = torch.tensor(0.0, requires_grad=True)
-
- sample_size = nqueries if nqueries > 0 else 1
- logging_output = {
- "loss": utils.item(loss.data) if reduce else loss.data,
- "ntokens": sample["ntokens"],
- "nsentences": sample["nsentences"],
- "sample_size": sample_size,
- "ncorrect": ncorrect,
- "nqueries": nqueries,
- }
- return loss, sample_size, logging_output
-
- @staticmethod
- def aggregate_logging_outputs(logging_outputs):
- """Aggregate logging outputs from data parallel training."""
- loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
- ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
- nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
- sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
-
- agg_output = {
- "loss": loss_sum / sample_size / math.log(2),
- "ntokens": ntokens,
- "nsentences": nsentences,
- "sample_size": sample_size,
- }
-
- ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs)
- nqueries = sum(log.get("nqueries", 0) for log in logging_outputs)
- if nqueries > 0:
- agg_output["accuracy"] = ncorrect / float(nqueries)
-
- return agg_output
-
-
-@register_criterion("winogrande")
-class WinograndeCriterion(WSCCriterion):
- def forward(self, model, sample, reduce=True):
- # compute loss and accuracy
- query_lprobs = self.get_lprobs(
- model,
- sample["query_tokens"],
- sample["query_masks"],
- )
- cand_lprobs = self.get_lprobs(
- model,
- sample["candidate_tokens"],
- sample["candidate_masks"],
- )
- pred = query_lprobs >= cand_lprobs
- loss = self.get_loss(query_lprobs, cand_lprobs)
-
- sample_size = sample["query_tokens"].size(0)
- ncorrect = pred.sum().item()
- logging_output = {
- "loss": utils.item(loss.data) if reduce else loss.data,
- "ntokens": sample["ntokens"],
- "nsentences": sample["nsentences"],
- "sample_size": sample_size,
- "ncorrect": ncorrect,
- "nqueries": sample_size,
- }
- return loss, sample_size, logging_output
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/criterions/label_smoothed_cross_entropy.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/criterions/label_smoothed_cross_entropy.py
deleted file mode 100644
index 56d63e3e1b5a036e0adf32480e2b66f371738013..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/criterions/label_smoothed_cross_entropy.py
+++ /dev/null
@@ -1,170 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-from dataclasses import dataclass, field
-
-import torch
-from fairseq import metrics, utils
-from fairseq.criterions import FairseqCriterion, register_criterion
-from fairseq.dataclass import FairseqDataclass
-from omegaconf import II
-
-
-@dataclass
-class LabelSmoothedCrossEntropyCriterionConfig(FairseqDataclass):
- label_smoothing: float = field(
- default=0.0,
- metadata={"help": "epsilon for label smoothing, 0 means no label smoothing"},
- )
- report_accuracy: bool = field(
- default=False,
- metadata={"help": "report accuracy metric"},
- )
- ignore_prefix_size: int = field(
- default=0,
- metadata={"help": "Ignore first N tokens"},
- )
- sentence_avg: bool = II("optimization.sentence_avg")
-
-
-def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True):
- if target.dim() == lprobs.dim() - 1:
- target = target.unsqueeze(-1)
- nll_loss = -lprobs.gather(dim=-1, index=target)
- smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
- if ignore_index is not None:
- pad_mask = target.eq(ignore_index)
- nll_loss.masked_fill_(pad_mask, 0.0)
- smooth_loss.masked_fill_(pad_mask, 0.0)
- else:
- nll_loss = nll_loss.squeeze(-1)
- smooth_loss = smooth_loss.squeeze(-1)
- if reduce:
- nll_loss = nll_loss.sum()
- smooth_loss = smooth_loss.sum()
- eps_i = epsilon / (lprobs.size(-1) - 1)
- loss = (1.0 - epsilon - eps_i) * nll_loss + eps_i * smooth_loss
- return loss, nll_loss
-
-
-@register_criterion(
- "label_smoothed_cross_entropy", dataclass=LabelSmoothedCrossEntropyCriterionConfig
-)
-class LabelSmoothedCrossEntropyCriterion(FairseqCriterion):
- def __init__(
- self,
- task,
- sentence_avg,
- label_smoothing,
- ignore_prefix_size=0,
- report_accuracy=False,
- ):
- super().__init__(task)
- self.sentence_avg = sentence_avg
- self.eps = label_smoothing
- self.ignore_prefix_size = ignore_prefix_size
- self.report_accuracy = report_accuracy
-
- def forward(self, model, sample, reduce=True):
- """Compute the loss for the given sample.
-
- Returns a tuple with three elements:
- 1) the loss
- 2) the sample size, which is used as the denominator for the gradient
- 3) logging outputs to display while training
- """
- net_output = model(**sample["net_input"])
- loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
- sample_size = (
- sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
- )
- logging_output = {
- "loss": loss.data,
- "nll_loss": nll_loss.data,
- "ntokens": sample["ntokens"],
- "nsentences": sample["target"].size(0),
- "sample_size": sample_size,
- }
- if self.report_accuracy:
- n_correct, total = self.compute_accuracy(model, net_output, sample)
- logging_output["n_correct"] = utils.item(n_correct.data)
- logging_output["total"] = utils.item(total.data)
- return loss, sample_size, logging_output
-
- def get_lprobs_and_target(self, model, net_output, sample):
- lprobs = model.get_normalized_probs(net_output, log_probs=True)
- target = model.get_targets(sample, net_output)
- if self.ignore_prefix_size > 0:
- if getattr(lprobs, "batch_first", False):
- lprobs = lprobs[:, self.ignore_prefix_size :, :].contiguous()
- target = target[:, self.ignore_prefix_size :].contiguous()
- else:
- lprobs = lprobs[self.ignore_prefix_size :, :, :].contiguous()
- target = target[self.ignore_prefix_size :, :].contiguous()
- return lprobs.view(-1, lprobs.size(-1)), target.view(-1)
-
- def compute_loss(self, model, net_output, sample, reduce=True):
- lprobs, target = self.get_lprobs_and_target(model, net_output, sample)
- loss, nll_loss = label_smoothed_nll_loss(
- lprobs,
- target,
- self.eps,
- ignore_index=self.padding_idx,
- reduce=reduce,
- )
- return loss, nll_loss
-
- def compute_accuracy(self, model, net_output, sample):
- lprobs, target = self.get_lprobs_and_target(model, net_output, sample)
- mask = target.ne(self.padding_idx)
- n_correct = torch.sum(
- lprobs.argmax(1).masked_select(mask).eq(target.masked_select(mask))
- )
- total = torch.sum(mask)
- return n_correct, total
-
- @classmethod
- def reduce_metrics(cls, logging_outputs) -> None:
- """Aggregate logging outputs from data parallel training."""
- loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
- nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs)
- ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
- sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
-
- metrics.log_scalar(
- "loss", loss_sum / sample_size / math.log(2), sample_size, round=3
- )
- metrics.log_scalar(
- "nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3
- )
- metrics.log_derived(
- "ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
- )
-
- total = utils.item(sum(log.get("total", 0) for log in logging_outputs))
- if total > 0:
- metrics.log_scalar("total", total)
- n_correct = utils.item(
- sum(log.get("n_correct", 0) for log in logging_outputs)
- )
- metrics.log_scalar("n_correct", n_correct)
- metrics.log_derived(
- "accuracy",
- lambda meters: round(
- meters["n_correct"].sum * 100.0 / meters["total"].sum, 3
- )
- if meters["total"].sum > 0
- else float("nan"),
- )
-
- @staticmethod
- def logging_outputs_can_be_summed() -> bool:
- """
- Whether the logging outputs returned by `forward` can be summed
- across workers prior to calling `reduce_metrics`. Setting this
- to True will improves distributed training speed.
- """
- return True
diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/text_to_speech/tacotron2.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/text_to_speech/tacotron2.py
deleted file mode 100644
index bb327e81e74900349e1357261bf2f14bc037ccd6..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/models/text_to_speech/tacotron2.py
+++ /dev/null
@@ -1,350 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from fairseq.models import (FairseqEncoder, FairseqEncoderDecoderModel,
- FairseqIncrementalDecoder, register_model,
- register_model_architecture)
-from fairseq.modules import LSTMCellWithZoneOut, LocationAttention
-
-
-logger = logging.getLogger(__name__)
-
-
-def encoder_init(m):
- if isinstance(m, nn.Conv1d):
- nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("relu"))
-
-
-class Tacotron2Encoder(FairseqEncoder):
- def __init__(self, args, src_dict, embed_speaker):
- super().__init__(src_dict)
- self.padding_idx = src_dict.pad()
- self.embed_speaker = embed_speaker
- self.spk_emb_proj = None
- if embed_speaker is not None:
- self.spk_emb_proj = nn.Linear(
- args.encoder_embed_dim + args.speaker_embed_dim,
- args.encoder_embed_dim
- )
-
- self.embed_tokens = nn.Embedding(len(src_dict), args.encoder_embed_dim,
- padding_idx=self.padding_idx)
-
- assert(args.encoder_conv_kernel_size % 2 == 1)
- self.convolutions = nn.ModuleList(
- nn.Sequential(
- nn.Conv1d(args.encoder_embed_dim, args.encoder_embed_dim,
- kernel_size=args.encoder_conv_kernel_size,
- padding=((args.encoder_conv_kernel_size - 1) // 2)),
- nn.BatchNorm1d(args.encoder_embed_dim),
- nn.ReLU(),
- nn.Dropout(args.encoder_dropout)
- )
- for _ in range(args.encoder_conv_layers)
- )
-
- self.lstm = nn.LSTM(args.encoder_embed_dim, args.encoder_embed_dim // 2,
- num_layers=args.encoder_lstm_layers,
- batch_first=True, bidirectional=True)
-
- self.apply(encoder_init)
-
- def forward(self, src_tokens, src_lengths=None, speaker=None, **kwargs):
- x = self.embed_tokens(src_tokens)
- x = x.transpose(1, 2).contiguous() # B x T x C -> B x C x T
- for conv in self.convolutions:
- x = conv(x)
- x = x.transpose(1, 2).contiguous() # B x C x T -> B x T x C
-
- src_lengths = src_lengths.cpu().long()
- x = nn.utils.rnn.pack_padded_sequence(x, src_lengths, batch_first=True)
- x = self.lstm(x)[0]
- x = nn.utils.rnn.pad_packed_sequence(x, batch_first=True)[0]
-
- encoder_padding_mask = src_tokens.eq(self.padding_idx)
-
- if self.embed_speaker is not None:
- seq_len, bsz, _ = x.size()
- emb = self.embed_speaker(speaker).expand(seq_len, bsz, -1)
- x = self.spk_emb_proj(torch.cat([x, emb], dim=2))
-
- return {
- "encoder_out": [x], # B x T x C
- "encoder_padding_mask": encoder_padding_mask, # B x T
- }
-
-
-class Prenet(nn.Module):
- def __init__(self, in_dim, n_layers, n_units, dropout):
- super().__init__()
- self.layers = nn.ModuleList(
- nn.Sequential(nn.Linear(in_dim if i == 0 else n_units, n_units),
- nn.ReLU())
- for i in range(n_layers)
- )
- self.dropout = dropout
-
- def forward(self, x):
- for layer in self.layers:
- x = F.dropout(layer(x), p=self.dropout) # always applies dropout
- return x
-
-
-class Postnet(nn.Module):
- def __init__(self, in_dim, n_channels, kernel_size, n_layers, dropout):
- super(Postnet, self).__init__()
- self.convolutions = nn.ModuleList()
- assert(kernel_size % 2 == 1)
- for i in range(n_layers):
- cur_layers = [
- nn.Conv1d(in_dim if i == 0 else n_channels,
- n_channels if i < n_layers - 1 else in_dim,
- kernel_size=kernel_size,
- padding=((kernel_size - 1) // 2)),
- nn.BatchNorm1d(n_channels if i < n_layers - 1 else in_dim)
- ] + ([nn.Tanh()] if i < n_layers - 1 else []) + [nn.Dropout(dropout)]
- nn.init.xavier_uniform_(
- cur_layers[0].weight,
- torch.nn.init.calculate_gain(
- "tanh" if i < n_layers - 1 else "linear"
- )
- )
- self.convolutions.append(nn.Sequential(*cur_layers))
-
- def forward(self, x):
- x = x.transpose(1, 2) # B x T x C -> B x C x T
- for conv in self.convolutions:
- x = conv(x)
- return x.transpose(1, 2)
-
-
-def decoder_init(m):
- if isinstance(m, torch.nn.Conv1d):
- nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("tanh"))
-
-
-class Tacotron2Decoder(FairseqIncrementalDecoder):
- def __init__(self, args, src_dict):
- super().__init__(None)
- self.args = args
- self.n_frames_per_step = args.n_frames_per_step
- self.out_dim = args.output_frame_dim * args.n_frames_per_step
-
- self.prenet = Prenet(self.out_dim, args.prenet_layers, args.prenet_dim,
- args.prenet_dropout)
-
- # take prev_context, prev_frame, (speaker embedding) as input
- self.attention_lstm = LSTMCellWithZoneOut(
- args.zoneout,
- args.prenet_dim + args.encoder_embed_dim,
- args.decoder_lstm_dim
- )
-
- # take attention_lstm output, attention_state, encoder_out as input
- self.attention = LocationAttention(
- args.attention_dim, args.encoder_embed_dim, args.decoder_lstm_dim,
- (1 + int(args.attention_use_cumprob)),
- args.attention_conv_dim, args.attention_conv_kernel_size
- )
-
- # take attention_lstm output, context, (gated_latent) as input
- self.lstm = nn.ModuleList(
- LSTMCellWithZoneOut(
- args.zoneout,
- args.encoder_embed_dim + args.decoder_lstm_dim,
- args.decoder_lstm_dim
- )
- for i in range(args.decoder_lstm_layers)
- )
-
- proj_in_dim = args.encoder_embed_dim + args.decoder_lstm_dim
- self.feat_proj = nn.Linear(proj_in_dim, self.out_dim)
- self.eos_proj = nn.Linear(proj_in_dim, 1)
-
- self.postnet = Postnet(self.out_dim, args.postnet_conv_dim,
- args.postnet_conv_kernel_size,
- args.postnet_layers, args.postnet_dropout)
-
- self.ctc_proj = None
- if getattr(args, "ctc_weight", 0.) > 0.:
- self.ctc_proj = nn.Linear(self.out_dim, len(src_dict))
-
- self.apply(decoder_init)
-
- def _get_states(self, incremental_state, enc_out):
- bsz, in_len, _ = enc_out.size()
- alstm_h = self.get_incremental_state(incremental_state, "alstm_h")
- if alstm_h is None:
- alstm_h = enc_out.new_zeros(bsz, self.args.decoder_lstm_dim)
- alstm_c = self.get_incremental_state(incremental_state, "alstm_c")
- if alstm_c is None:
- alstm_c = enc_out.new_zeros(bsz, self.args.decoder_lstm_dim)
-
- lstm_h = self.get_incremental_state(incremental_state, "lstm_h")
- if lstm_h is None:
- lstm_h = [enc_out.new_zeros(bsz, self.args.decoder_lstm_dim)
- for _ in range(self.args.decoder_lstm_layers)]
- lstm_c = self.get_incremental_state(incremental_state, "lstm_c")
- if lstm_c is None:
- lstm_c = [enc_out.new_zeros(bsz, self.args.decoder_lstm_dim)
- for _ in range(self.args.decoder_lstm_layers)]
-
- attn_w = self.get_incremental_state(incremental_state, "attn_w")
- if attn_w is None:
- attn_w = enc_out.new_zeros(bsz, in_len)
- attn_w_cum = self.get_incremental_state(incremental_state, "attn_w_cum")
- if attn_w_cum is None:
- attn_w_cum = enc_out.new_zeros(bsz, in_len)
- return alstm_h, alstm_c, lstm_h, lstm_c, attn_w, attn_w_cum
-
- def _get_init_attn_c(self, enc_out, enc_mask):
- bsz = enc_out.size(0)
- if self.args.init_attn_c == "zero":
- return enc_out.new_zeros(bsz, self.args.encoder_embed_dim)
- elif self.args.init_attn_c == "avg":
- enc_w = (~enc_mask).type(enc_out.type())
- enc_w = enc_w / enc_w.sum(dim=1, keepdim=True)
- return torch.sum(enc_out * enc_w.unsqueeze(2), dim=1)
- else:
- raise ValueError(f"{self.args.init_attn_c} not supported")
-
- def forward(self, prev_output_tokens, encoder_out=None,
- incremental_state=None, target_lengths=None, **kwargs):
- enc_mask = encoder_out["encoder_padding_mask"]
- enc_out = encoder_out["encoder_out"][0]
- in_len = enc_out.size(1)
-
- if incremental_state is not None:
- prev_output_tokens = prev_output_tokens[:, -1:, :]
- bsz, out_len, _ = prev_output_tokens.size()
-
- prenet_out = self.prenet(prev_output_tokens)
- (alstm_h, alstm_c, lstm_h, lstm_c,
- attn_w, attn_w_cum) = self._get_states(incremental_state, enc_out)
- attn_ctx = self._get_init_attn_c(enc_out, enc_mask)
-
- attn_out = enc_out.new_zeros(bsz, in_len, out_len)
- feat_out = enc_out.new_zeros(bsz, out_len, self.out_dim)
- eos_out = enc_out.new_zeros(bsz, out_len)
- for t in range(out_len):
- alstm_in = torch.cat((attn_ctx, prenet_out[:, t, :]), dim=1)
- alstm_h, alstm_c = self.attention_lstm(alstm_in, (alstm_h, alstm_c))
-
- attn_state = attn_w.unsqueeze(1)
- if self.args.attention_use_cumprob:
- attn_state = torch.stack((attn_w, attn_w_cum), dim=1)
- attn_ctx, attn_w = self.attention(
- enc_out, enc_mask, alstm_h, attn_state
- )
- attn_w_cum = attn_w_cum + attn_w
- attn_out[:, :, t] = attn_w
-
- for i, cur_lstm in enumerate(self.lstm):
- if i == 0:
- lstm_in = torch.cat((attn_ctx, alstm_h), dim=1)
- else:
- lstm_in = torch.cat((attn_ctx, lstm_h[i - 1]), dim=1)
- lstm_h[i], lstm_c[i] = cur_lstm(lstm_in, (lstm_h[i], lstm_c[i]))
-
- proj_in = torch.cat((attn_ctx, lstm_h[-1]), dim=1)
- feat_out[:, t, :] = self.feat_proj(proj_in)
- eos_out[:, t] = self.eos_proj(proj_in).squeeze(1)
- self.attention.clear_cache()
-
- self.set_incremental_state(incremental_state, "alstm_h", alstm_h)
- self.set_incremental_state(incremental_state, "alstm_c", alstm_c)
- self.set_incremental_state(incremental_state, "lstm_h", lstm_h)
- self.set_incremental_state(incremental_state, "lstm_c", lstm_c)
- self.set_incremental_state(incremental_state, "attn_w", attn_w)
- self.set_incremental_state(incremental_state, "attn_w_cum", attn_w_cum)
-
- post_feat_out = feat_out + self.postnet(feat_out)
- eos_out = eos_out.view(bsz, out_len, 1)
- return post_feat_out, eos_out, {"attn": attn_out, "feature_out": feat_out}
-
-
-@register_model("tacotron_2")
-class Tacotron2Model(FairseqEncoderDecoderModel):
- """
- Implementation for https://arxiv.org/pdf/1712.05884.pdf
- """
-
- @staticmethod
- def add_args(parser):
- # encoder
- parser.add_argument("--encoder-dropout", type=float)
- parser.add_argument("--encoder-embed-dim", type=int)
- parser.add_argument("--encoder-conv-layers", type=int)
- parser.add_argument("--encoder-conv-kernel-size", type=int)
- parser.add_argument("--encoder-lstm-layers", type=int)
- # decoder
- parser.add_argument("--attention-dim", type=int)
- parser.add_argument("--attention-conv-dim", type=int)
- parser.add_argument("--attention-conv-kernel-size", type=int)
- parser.add_argument("--prenet-dropout", type=float)
- parser.add_argument("--prenet-layers", type=int)
- parser.add_argument("--prenet-dim", type=int)
- parser.add_argument("--postnet-dropout", type=float)
- parser.add_argument("--postnet-layers", type=int)
- parser.add_argument("--postnet-conv-dim", type=int)
- parser.add_argument("--postnet-conv-kernel-size", type=int)
- parser.add_argument("--init-attn-c", type=str)
- parser.add_argument("--attention-use-cumprob", action='store_true')
- parser.add_argument("--zoneout", type=float)
- parser.add_argument("--decoder-lstm-layers", type=int)
- parser.add_argument("--decoder-lstm-dim", type=int)
- parser.add_argument("--output-frame-dim", type=int)
-
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- self._num_updates = 0
-
- @classmethod
- def build_model(cls, args, task):
- embed_speaker = task.get_speaker_embeddings(args)
- encoder = Tacotron2Encoder(args, task.src_dict, embed_speaker)
- decoder = Tacotron2Decoder(args, task.src_dict)
- return cls(encoder, decoder)
-
- def forward_encoder(self, src_tokens, src_lengths, **kwargs):
- return self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
-
- def set_num_updates(self, num_updates):
- super().set_num_updates(num_updates)
- self._num_updates = num_updates
-
-
-@register_model_architecture("tacotron_2", "tacotron_2")
-def base_architecture(args):
- # encoder
- args.encoder_dropout = getattr(args, "encoder_dropout", 0.5)
- args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
- args.encoder_conv_layers = getattr(args, "encoder_conv_layers", 3)
- args.encoder_conv_kernel_size = getattr(args, "encoder_conv_kernel_size", 5)
- args.encoder_lstm_layers = getattr(args, "encoder_lstm_layers", 1)
- # decoder
- args.attention_dim = getattr(args, "attention_dim", 128)
- args.attention_conv_dim = getattr(args, "attention_conv_dim", 32)
- args.attention_conv_kernel_size = getattr(args,
- "attention_conv_kernel_size", 15)
- args.prenet_dropout = getattr(args, "prenet_dropout", 0.5)
- args.prenet_layers = getattr(args, "prenet_layers", 2)
- args.prenet_dim = getattr(args, "prenet_dim", 256)
- args.postnet_dropout = getattr(args, "postnet_dropout", 0.5)
- args.postnet_layers = getattr(args, "postnet_layers", 5)
- args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512)
- args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5)
- args.init_attn_c = getattr(args, "init_attn_c", "zero")
- args.attention_use_cumprob = getattr(args, "attention_use_cumprob", True)
- args.zoneout = getattr(args, "zoneout", 0.1)
- args.decoder_lstm_layers = getattr(args, "decoder_lstm_layers", 2)
- args.decoder_lstm_dim = getattr(args, "decoder_lstm_dim", 1024)
- args.output_frame_dim = getattr(args, "output_frame_dim", 80)
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/linformer/linformer_src/__init__.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/linformer/linformer_src/__init__.py
deleted file mode 100644
index 1c52f135ea6f99d0effe8ce1f7d77cbd66be3745..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/linformer/linformer_src/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from .models import linformer_roberta # noqa
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/linformer/linformer_src/modules/linformer_sentence_encoder_layer.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/linformer/linformer_src/modules/linformer_sentence_encoder_layer.py
deleted file mode 100644
index 7e2caa03400129ac0bb34ae35274cdf46f27a055..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/linformer/linformer_src/modules/linformer_sentence_encoder_layer.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch
-from fairseq import utils
-from fairseq.modules import TransformerEncoderLayer
-
-from .multihead_linear_attention import MultiheadLinearAttention
-
-
-class LinformerTransformerEncoderLayer(TransformerEncoderLayer):
- """
- Implements a Linformer Encoder Layer used in BERT/XLM style pre-trained
- models.
- """
-
- def __init__(self, args, shared_compress_layer):
- # wrap in a list so it's not automatically registered by PyTorch
- self.shared_compress_layer = [shared_compress_layer]
-
- super().__init__(args)
-
- self.register_buffer("version", torch.tensor(2))
-
- def build_self_attention(self, embed_dim, args):
- return MultiheadLinearAttention(
- embed_dim,
- args.encoder_attention_heads,
- dropout=args.dropout,
- self_attention=True,
- q_noise=args.quant_noise_pq,
- qn_block_size=args.quant_noise_pq_block_size,
- compressed=args.compressed,
- max_seq_len=args.max_positions,
- shared_kv_compressed=args.shared_kv_compressed,
- shared_compress_layer=self.shared_compress_layer[0],
- freeze_compress=args.freeze_compress,
- )
-
- def upgrade_state_dict_named(self, state_dict, name):
- super().upgrade_state_dict_named(state_dict, name)
- prefix = name + "." if name != "" else ""
-
- # some old checkpoints had weight sharing implemented incorrectly
- # (note: this was correct in the original paper code)
- if utils.item(state_dict.get(f"{prefix}version", torch.tensor(1))) < 2:
- state_dict[f"{prefix}version"] = torch.tensor(1)
- # check compression layer sharing
- if f"{prefix}shared_compress_layer.weight" in state_dict:
- # reinitialize block without sharing compression layer to match
- # old behavior
- self.shared_compress_layer = [
- torch.nn.Linear(
- self.shared_compress_layer[0].weight.size(1),
- self.shared_compress_layer[0].weight.size(0),
- )
- ]
- self.self_attn = self.build_self_attention(self.embed_dim, self.args)
- # delete shared_compress_layer, since it's already copied to
- # self_attn.compress_k.weight
- del state_dict[f"{prefix}shared_compress_layer.weight"]
- if f"{prefix}shared_compress_layer.bias" in state_dict:
- del state_dict[f"{prefix}shared_compress_layer.bias"]
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/shuffled_word_order/README.finetuning.md b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/shuffled_word_order/README.finetuning.md
deleted file mode 100644
index ecbcb65884640c3327a2cbaef8aad4f3cfe812f7..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/shuffled_word_order/README.finetuning.md
+++ /dev/null
@@ -1,135 +0,0 @@
-# Fine-tuning details
-
-For each task (GLUE and PAWS), we perform hyperparam search for each model, and report the mean and standard deviation across 5 seeds of the best model. First, get the datasets following the instructions in [RoBERTa fine-tuning README](../roberta/README.glue.md). Alternatively, you can use [huggingface datasets](https://huggingface.co/docs/datasets/) to get the task data:
-
-```python
-from datasets import load_dataset
-import pandas as pd
-from pathlib import Path
-
-key2file = {
-"paws": {
- "loc": "paws_data",
- "columns": ["id", "sentence1", "sentence2", "label"],
- "train": "train.tsv",
- "validation": "dev.tsv",
- "test": "test.tsv"
- }
-}
-
-task_data = load_dataset("paws", "labeled_final")
-task_config = key2file["paws"]
-save_path = Path(task_config["loc"])
-save_path.mkdir(exist_ok=True, parents=True)
-for key, fl in task_config.items():
- if key in ["loc", "columns"]:
- continue
- print(f"Reading {key}")
- columns = task_config["columns"]
- df = pd.DataFrame(task_data[key])
- print(df.columns)
- df = df[columns]
- print(f"Got {len(df)} records")
- save_loc = save_path / fl
- print(f"Saving to : {save_loc}")
- df.to_csv(save_loc, sep="\t", header=None, index=None)
-
-```
-
-- Preprocess using RoBERTa GLUE preprocessing script, while keeping in mind the column numbers for `sentence1`, `sentence2` and `label` (which is 0,1,2 if you save the data according to the above example.)
-- Then, fine-tuning is performed similarly to RoBERTa (for example, in case of RTE):
-
-```bash
-TOTAL_NUM_UPDATES=30875 # 10 epochs through RTE for bsz 16
-WARMUP_UPDATES=1852 # 6 percent of the number of updates
-LR=2e-05 # Peak LR for polynomial LR scheduler.
-NUM_CLASSES=2
-MAX_SENTENCES=16 # Batch size.
-SHUFFLED_ROBERTA_PATH=/path/to/shuffled_roberta/model.pt
-
-CUDA_VISIBLE_DEVICES=0 fairseq-train RTE-bin/ \
- --restore-file $SHUFFLED_ROBERTA_PATH \
- --max-positions 512 \
- --batch-size $MAX_SENTENCES \
- --max-tokens 4400 \
- --task sentence_prediction \
- --reset-optimizer --reset-dataloader --reset-meters \
- --required-batch-size-multiple 1 \
- --init-token 0 --separator-token 2 \
- --arch roberta_large \
- --criterion sentence_prediction \
- --num-classes $NUM_CLASSES \
- --dropout 0.1 --attention-dropout 0.1 \
- --weight-decay 0.1 --optimizer adam --adam-betas "(0.9, 0.98)" --adam-eps 1e-06 \
- --clip-norm 0.0 \
- --lr-scheduler polynomial_decay --lr $LR --total-num-update $TOTAL_NUM_UPDATES --warmup-updates $WARMUP_UPDATES \
- --fp16 --fp16-init-scale 4 --threshold-loss-scale 1 --fp16-scale-window 128 \
- --max-epoch 10 \
- --find-unused-parameters \
- --best-checkpoint-metric accuracy --maximize-best-checkpoint-metric;
-```
-
-- `TOTAL_NUM_UPDATES` is computed based on the `--batch_size` value and the dataset size.
-- `WARMUP_UPDATES` is computed as 6% of `TOTAL_NUM_UPDATES`
-- Best hyperparam of `--lr` and `--batch_size` is reported below:
-
-## `--lr`
-
-| | name | RTE | MRPC | SST-2 | CoLA | QQP | QNLI | MNLI | PAWS |
-| --: | :----------- | ----: | ----: | ----: | ----: | ----: | ----: | ----: | ----: |
-| 0 | original | 2e-05 | 2e-05 | 1e-05 | 2e-05 | 1e-05 | 1e-05 | 1e-05 | 2e-05 |
-| 1 | n_1 | 2e-05 | 1e-05 | 1e-05 | 1e-05 | 3e-05 | 1e-05 | 2e-05 | 2e-05 |
-| 2 | n_2 | 2e-05 | 2e-05 | 1e-05 | 1e-05 | 2e-05 | 1e-05 | 1e-05 | 3e-05 |
-| 3 | n_3 | 3e-05 | 1e-05 | 2e-05 | 2e-05 | 3e-05 | 1e-05 | 1e-05 | 2e-05 |
-| 4 | n_4 | 3e-05 | 1e-05 | 2e-05 | 2e-05 | 2e-05 | 1e-05 | 1e-05 | 2e-05 |
-| 5 | r512 | 1e-05 | 3e-05 | 2e-05 | 2e-05 | 3e-05 | 2e-05 | 3e-05 | 2e-05 |
-| 6 | rand_corpus | 2e-05 | 1e-05 | 3e-05 | 1e-05 | 3e-05 | 3e-05 | 3e-05 | 2e-05 |
-| 7 | rand_uniform | 2e-05 | 1e-05 | 3e-05 | 2e-05 | 3e-05 | 3e-05 | 3e-05 | 1e-05 |
-| 8 | rand_init | 1e-05 | 1e-05 | 3e-05 | 1e-05 | 1e-05 | 1e-05 | 2e-05 | 1e-05 |
-| 9 | no_pos | 1e-05 | 3e-05 | 2e-05 | 1e-05 | 1e-05 | 1e-05 | 1e-05 | 1e-05 |
-
-## `--batch_size`
-
-| | name | RTE | MRPC | SST-2 | CoLA | QQP | QNLI | MNLI | PAWS |
-| --: | :----------- | --: | ---: | ----: | ---: | --: | ---: | ---: | ---: |
-| 0 | orig | 16 | 16 | 32 | 16 | 16 | 32 | 32 | 16 |
-| 1 | n_1 | 32 | 32 | 16 | 32 | 32 | 16 | 32 | 16 |
-| 2 | n_2 | 32 | 16 | 32 | 16 | 32 | 32 | 16 | 32 |
-| 3 | n_3 | 32 | 32 | 16 | 32 | 32 | 16 | 32 | 32 |
-| 4 | n_4 | 32 | 16 | 32 | 16 | 32 | 32 | 32 | 32 |
-| 5 | r512 | 32 | 16 | 16 | 32 | 32 | 16 | 16 | 16 |
-| 6 | rand_corpus | 16 | 16 | 16 | 16 | 32 | 16 | 16 | 32 |
-| 7 | rand_uniform | 16 | 32 | 16 | 16 | 32 | 16 | 16 | 16 |
-| 8 | rand_init | 16 | 16 | 32 | 16 | 16 | 16 | 32 | 16 |
-| 9 | no_pos | 16 | 32 | 16 | 16 | 32 | 16 | 16 | 16 |
-
-- Perform inference similar to RoBERTa as well:
-
-```python
-from fairseq.models.roberta import RobertaModel
-
-roberta = RobertaModel.from_pretrained(
- 'checkpoints/',
- checkpoint_file='checkpoint_best.pt',
- data_name_or_path='PAWS-bin'
-)
-
-label_fn = lambda label: roberta.task.label_dictionary.string(
- [label + roberta.task.label_dictionary.nspecial]
-)
-ncorrect, nsamples = 0, 0
-roberta.cuda()
-roberta.eval()
-with open('paws_data/dev.tsv') as fin:
- fin.readline()
- for index, line in enumerate(fin):
- tokens = line.strip().split('\t')
- sent1, sent2, target = tokens[0], tokens[1], tokens[2]
- tokens = roberta.encode(sent1, sent2)
- prediction = roberta.predict('sentence_classification_head', tokens).argmax().item()
- prediction_label = label_fn(prediction)
- ncorrect += int(prediction_label == target)
- nsamples += 1
-print('| Accuracy: ', float(ncorrect)/float(nsamples))
-
-```
diff --git a/spaces/OFA-Sys/OFA-vqa/utils/cider/pyciderevalcap/cider/cider.py b/spaces/OFA-Sys/OFA-vqa/utils/cider/pyciderevalcap/cider/cider.py
deleted file mode 100644
index 5b65978370cb82dd2111500e7f05c4d05306162c..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/utils/cider/pyciderevalcap/cider/cider.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Filename: cider.py
-#
-#
-# Description: Describes the class to compute the CIDEr
-# (Consensus-Based Image Description Evaluation) Metric
-# by Vedantam, Zitnick, and Parikh (http://arxiv.org/abs/1411.5726)
-#
-# Creation Date: Sun Feb 8 14:16:54 2015
-#
-# Authors: Ramakrishna Vedantam and
-# Tsung-Yi Lin
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-from .cider_scorer import CiderScorer
-
-
-class Cider:
- """
- Main Class to compute the CIDEr metric
-
- """
- def __init__(self, n=4, df="corpus"):
- """
- Initialize the CIDEr scoring function
- : param n (int): n-gram size
- : param df (string): specifies where to get the IDF values from
- takes values 'corpus', 'coco-train'
- : return: None
- """
- # set cider to sum over 1 to 4-grams
- self._n = n
- self._df = df
- self.cider_scorer = CiderScorer(n=self._n, df_mode=self._df)
-
- def compute_score(self, gts, res):
- """
- Main function to compute CIDEr score
- : param gts (dict) : {image:tokenized reference sentence}
- : param res (dict) : {image:tokenized candidate sentence}
- : return: cider (float) : computed CIDEr score for the corpus
- """
-
- # clear all the previous hypos and refs
- self.cider_scorer.clear()
-
- for res_id in res:
-
- hypo = res_id['caption']
- ref = gts[res_id['image_id']]
-
- # Sanity check.
- assert(type(hypo) is list)
- assert(len(hypo) == 1)
- assert(type(ref) is list)
- assert(len(ref) > 0)
- self.cider_scorer += (hypo[0], ref)
-
- (score, scores) = self.cider_scorer.compute_score()
-
- return score, scores
-
- def method(self):
- return "CIDEr"
diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/export/shared.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/export/shared.py
deleted file mode 100644
index 2d0f7bf3999064a68f28a1207d65a2de7ae98c0a..0000000000000000000000000000000000000000
--- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/export/shared.py
+++ /dev/null
@@ -1,1034 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-import collections
-import contextlib
-import copy
-import functools
-import logging
-import numpy as np
-import os
-from typing import Any, Callable, Dict, List, Optional, Tuple, Union
-from unittest import mock
-import caffe2.python.utils as putils
-import torch
-import torch.nn.functional as F
-from caffe2.proto import caffe2_pb2
-from caffe2.python import core, net_drawer, workspace
-from torch.nn.functional import interpolate as interp
-
-logger = logging.getLogger(__name__)
-
-
-# ==== torch/utils_toffee/cast.py =======================================
-
-
-def to_device(t, device_str):
- """
- This function is a replacement of .to(another_device) such that it allows the
- casting to be traced properly by explicitly calling the underlying copy ops.
- It also avoids introducing unncessary op when casting to the same device.
- """
- src = t.device
- dst = torch.device(device_str)
-
- if src == dst:
- return t
- elif src.type == "cuda" and dst.type == "cpu":
- return torch.ops._caffe2.CopyGPUToCPU(t)
- elif src.type == "cpu" and dst.type == "cuda":
- return torch.ops._caffe2.CopyCPUToGPU(t)
- else:
- raise RuntimeError("Can't cast tensor from device {} to device {}".format(src, dst))
-
-
-# ==== torch/utils_toffee/interpolate.py =======================================
-
-
-# Note: borrowed from vision/detection/fair/detectron/detectron/modeling/detector.py
-def BilinearInterpolation(tensor_in, up_scale):
- assert up_scale % 2 == 0, "Scale should be even"
-
- def upsample_filt(size):
- factor = (size + 1) // 2
- if size % 2 == 1:
- center = factor - 1
- else:
- center = factor - 0.5
-
- og = np.ogrid[:size, :size]
- return (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)
-
- kernel_size = int(up_scale) * 2
- bil_filt = upsample_filt(kernel_size)
-
- dim = int(tensor_in.shape[1])
- kernel = np.zeros((dim, dim, kernel_size, kernel_size), dtype=np.float32)
- kernel[range(dim), range(dim), :, :] = bil_filt
-
- tensor_out = F.conv_transpose2d(
- tensor_in,
- weight=to_device(torch.Tensor(kernel), tensor_in.device),
- bias=None,
- stride=int(up_scale),
- padding=int(up_scale / 2),
- )
-
- return tensor_out
-
-
-# NOTE: ONNX is incompatible with traced torch.nn.functional.interpolate if
-# using dynamic `scale_factor` rather than static `size`. (T43166860)
-# NOTE: Caffe2 Int8 conversion might not be able to quantize `size` properly.
-def onnx_compatibale_interpolate(
- input, size=None, scale_factor=None, mode="nearest", align_corners=None
-):
- # NOTE: The input dimensions are interpreted in the form:
- # `mini-batch x channels x [optional depth] x [optional height] x width`.
- if size is None and scale_factor is not None:
- if input.dim() == 4:
- if isinstance(scale_factor, (int, float)):
- height_scale, width_scale = (scale_factor, scale_factor)
- else:
- assert isinstance(scale_factor, (tuple, list))
- assert len(scale_factor) == 2
- height_scale, width_scale = scale_factor
-
- assert not align_corners, "No matching C2 op for align_corners == True"
- if mode == "nearest":
- return torch.ops._caffe2.ResizeNearest(
- input, order="NCHW", width_scale=width_scale, height_scale=height_scale
- )
- elif mode == "bilinear":
- logger.warning(
- "Use F.conv_transpose2d for bilinear interpolate"
- " because there's no such C2 op, this may cause significant"
- " slowdown and the boundary pixels won't be as same as"
- " using F.interpolate due to padding."
- )
- assert height_scale == width_scale
- return BilinearInterpolation(input, up_scale=height_scale)
- logger.warning("Output size is not static, it might cause ONNX conversion issue")
-
- return interp(input, size, scale_factor, mode, align_corners)
-
-
-@contextlib.contextmanager
-def mock_torch_nn_functional_interpolate():
- if torch.onnx.is_in_onnx_export():
- with mock.patch(
- "torch.nn.functional.interpolate", side_effect=onnx_compatibale_interpolate
- ):
- yield
- else:
- yield
-
-
-# ==== torch/utils_caffe2/ws_utils.py ==========================================
-
-
-class ScopedWS(object):
- def __init__(self, ws_name, is_reset, is_cleanup=False):
- self.ws_name = ws_name
- self.is_reset = is_reset
- self.is_cleanup = is_cleanup
- self.org_ws = ""
-
- def __enter__(self):
- self.org_ws = workspace.CurrentWorkspace()
- if self.ws_name is not None:
- workspace.SwitchWorkspace(self.ws_name, True)
- if self.is_reset:
- workspace.ResetWorkspace()
-
- return workspace
-
- def __exit__(self, *args):
- if self.is_cleanup:
- workspace.ResetWorkspace()
- if self.ws_name is not None:
- workspace.SwitchWorkspace(self.org_ws)
-
-
-def fetch_any_blob(name):
- bb = None
- try:
- bb = workspace.FetchBlob(name)
- except TypeError:
- bb = workspace.FetchInt8Blob(name)
- except Exception as e:
- logger.error("Get blob {} error: {}".format(name, e))
-
- return bb
-
-
-# ==== torch/utils_caffe2/protobuf.py ==========================================
-
-
-def get_pb_arg(pb, arg_name):
- for x in pb.arg:
- if x.name == arg_name:
- return x
- return None
-
-
-def get_pb_arg_valf(pb, arg_name, default_val):
- arg = get_pb_arg(pb, arg_name)
- return arg.f if arg is not None else default_val
-
-
-def get_pb_arg_floats(pb, arg_name, default_val):
- arg = get_pb_arg(pb, arg_name)
- return list(map(float, arg.floats)) if arg is not None else default_val
-
-
-def get_pb_arg_ints(pb, arg_name, default_val):
- arg = get_pb_arg(pb, arg_name)
- return list(map(int, arg.ints)) if arg is not None else default_val
-
-
-def get_pb_arg_vali(pb, arg_name, default_val):
- arg = get_pb_arg(pb, arg_name)
- return arg.i if arg is not None else default_val
-
-
-def get_pb_arg_vals(pb, arg_name, default_val):
- arg = get_pb_arg(pb, arg_name)
- return arg.s if arg is not None else default_val
-
-
-def get_pb_arg_valstrings(pb, arg_name, default_val):
- arg = get_pb_arg(pb, arg_name)
- return list(arg.strings) if arg is not None else default_val
-
-
-def check_set_pb_arg(pb, arg_name, arg_attr, arg_value, allow_override=False):
- arg = get_pb_arg(pb, arg_name)
- if arg is None:
- arg = putils.MakeArgument(arg_name, arg_value)
- assert hasattr(arg, arg_attr)
- pb.arg.extend([arg])
- if allow_override and getattr(arg, arg_attr) != arg_value:
- logger.warning(
- "Override argument {}: {} -> {}".format(arg_name, getattr(arg, arg_attr), arg_value)
- )
- setattr(arg, arg_attr, arg_value)
- else:
- assert arg is not None
- assert getattr(arg, arg_attr) == arg_value, "Existing value {}, new value {}".format(
- getattr(arg, arg_attr), arg_value
- )
-
-
-def _create_const_fill_op_from_numpy(name, tensor, device_option=None):
- assert type(tensor) == np.ndarray
- kTypeNameMapper = {
- np.dtype("float32"): "GivenTensorFill",
- np.dtype("int32"): "GivenTensorIntFill",
- np.dtype("int64"): "GivenTensorInt64Fill",
- np.dtype("uint8"): "GivenTensorStringFill",
- }
-
- args_dict = {}
- if tensor.dtype == np.dtype("uint8"):
- args_dict.update({"values": [str(tensor.data)], "shape": [1]})
- else:
- args_dict.update({"values": tensor, "shape": tensor.shape})
-
- if device_option is not None:
- args_dict["device_option"] = device_option
-
- return core.CreateOperator(kTypeNameMapper[tensor.dtype], [], [name], **args_dict)
-
-
-def _create_const_fill_op_from_c2_int8_tensor(name, int8_tensor):
- assert type(int8_tensor) == workspace.Int8Tensor
- kTypeNameMapper = {
- np.dtype("int32"): "Int8GivenIntTensorFill",
- np.dtype("uint8"): "Int8GivenTensorFill",
- }
-
- tensor = int8_tensor.data
- assert tensor.dtype in [np.dtype("uint8"), np.dtype("int32")]
- values = tensor.tobytes() if tensor.dtype == np.dtype("uint8") else tensor
-
- return core.CreateOperator(
- kTypeNameMapper[tensor.dtype],
- [],
- [name],
- values=values,
- shape=tensor.shape,
- Y_scale=int8_tensor.scale,
- Y_zero_point=int8_tensor.zero_point,
- )
-
-
-def create_const_fill_op(
- name: str,
- blob: Union[np.ndarray, workspace.Int8Tensor],
- device_option: Optional[caffe2_pb2.DeviceOption] = None,
-) -> caffe2_pb2.OperatorDef:
- """
- Given a blob object, return the Caffe2 operator that creates this blob
- as constant. Currently support NumPy tensor and Caffe2 Int8Tensor.
- """
-
- tensor_type = type(blob)
- assert tensor_type in [
- np.ndarray,
- workspace.Int8Tensor,
- ], 'Error when creating const fill op for "{}", unsupported blob type: {}'.format(
- name, type(blob)
- )
-
- if tensor_type == np.ndarray:
- return _create_const_fill_op_from_numpy(name, blob, device_option)
- elif tensor_type == workspace.Int8Tensor:
- assert device_option is None
- return _create_const_fill_op_from_c2_int8_tensor(name, blob)
-
-
-def construct_init_net_from_params(
- params: Dict[str, Any], device_options: Optional[Dict[str, caffe2_pb2.DeviceOption]] = None
-) -> caffe2_pb2.NetDef:
- """
- Construct the init_net from params dictionary
- """
- init_net = caffe2_pb2.NetDef()
- device_options = device_options or {}
- for name, blob in params.items():
- if isinstance(blob, str):
- logger.warning(
- (
- "Blob {} with type {} is not supported in generating init net,"
- " skipped.".format(name, type(blob))
- )
- )
- continue
- init_net.op.extend(
- [create_const_fill_op(name, blob, device_option=device_options.get(name, None))]
- )
- init_net.external_output.append(name)
- return init_net
-
-
-def get_producer_map(ssa):
- """
- Return dict from versioned blob to (i, j),
- where i is index of producer op, j is the index of output of that op.
- """
- producer_map = {}
- for i in range(len(ssa)):
- outputs = ssa[i][1]
- for j, outp in enumerate(outputs):
- producer_map[outp] = (i, j)
- return producer_map
-
-
-def get_consumer_map(ssa):
- """
- Return dict from versioned blob to list of (i, j),
- where i is index of consumer op, j is the index of input of that op.
- """
- consumer_map = collections.defaultdict(list)
- for i in range(len(ssa)):
- inputs = ssa[i][0]
- for j, inp in enumerate(inputs):
- consumer_map[inp].append((i, j))
- return consumer_map
-
-
-def get_params_from_init_net(
- init_net: caffe2_pb2.NetDef,
-) -> [Dict[str, Any], Dict[str, caffe2_pb2.DeviceOption]]:
- """
- Take the output blobs from init_net by running it.
- Outputs:
- params: dict from blob name to numpy array
- device_options: dict from blob name to the device option of its creating op
- """
- # NOTE: this assumes that the params is determined by producer op with the
- # only exception be CopyGPUToCPU which is CUDA op but returns CPU tensor.
- def _get_device_option(producer_op):
- if producer_op.type == "CopyGPUToCPU":
- return caffe2_pb2.DeviceOption()
- else:
- return producer_op.device_option
-
- with ScopedWS("__get_params_from_init_net__", is_reset=True, is_cleanup=True) as ws:
- ws.RunNetOnce(init_net)
- params = {b: fetch_any_blob(b) for b in init_net.external_output}
- ssa, versions = core.get_ssa(init_net)
- producer_map = get_producer_map(ssa)
- device_options = {
- b: _get_device_option(init_net.op[producer_map[(b, versions[b])][0]])
- for b in init_net.external_output
- }
- return params, device_options
-
-
-def _updater_raise(op, input_types, output_types):
- raise RuntimeError(
- "Failed to apply updater for op {} given input_types {} and"
- " output_types {}".format(op, input_types, output_types)
- )
-
-
-def _generic_status_identifier(
- predict_net: caffe2_pb2.NetDef,
- status_updater: Callable,
- known_status: Dict[Tuple[str, int], Any],
-) -> Dict[Tuple[str, int], Any]:
- """
- Statically infer the status of each blob, the status can be such as device type
- (CPU/GPU), layout (NCHW/NHWC), data type (float32/int8), etc. "Blob" here
- is versioned blob (Tuple[str, int]) in the format compatible with ssa.
- Inputs:
- predict_net: the caffe2 network
- status_updater: a callable, given an op and the status of its input/output,
- it returns the updated status of input/output. `None` is used for
- representing unknown status.
- known_status: a dict containing known status, used as initialization.
- Outputs:
- A dict mapping from versioned blob to its status
- """
- ssa, versions = core.get_ssa(predict_net)
- versioned_ext_input = [(b, 0) for b in predict_net.external_input]
- versioned_ext_output = [(b, versions[b]) for b in predict_net.external_output]
- all_versioned_blobs = set().union(*[set(x[0] + x[1]) for x in ssa])
-
- allowed_vbs = all_versioned_blobs.union(versioned_ext_input).union(versioned_ext_output)
- assert all(k in allowed_vbs for k in known_status)
- assert all(v is not None for v in known_status.values())
- _known_status = copy.deepcopy(known_status)
-
- def _check_and_update(key, value):
- assert value is not None
- if key in _known_status:
- if not _known_status[key] == value:
- raise RuntimeError(
- "Confilict status for {}, existing status {}, new status {}".format(
- key, _known_status[key], value
- )
- )
- _known_status[key] = value
-
- def _update_i(op, ssa_i):
- versioned_inputs = ssa_i[0]
- versioned_outputs = ssa_i[1]
-
- inputs_status = [_known_status.get(b, None) for b in versioned_inputs]
- outputs_status = [_known_status.get(b, None) for b in versioned_outputs]
-
- new_inputs_status, new_outputs_status = status_updater(op, inputs_status, outputs_status)
-
- for versioned_blob, status in zip(
- versioned_inputs + versioned_outputs, new_inputs_status + new_outputs_status
- ):
- if status is not None:
- _check_and_update(versioned_blob, status)
-
- for op, ssa_i in zip(predict_net.op, ssa):
- _update_i(op, ssa_i)
- for op, ssa_i in zip(reversed(predict_net.op), reversed(ssa)):
- _update_i(op, ssa_i)
-
- # NOTE: This strictly checks all the blob from predict_net must be assgined
- # a known status. However sometimes it's impossible (eg. having deadend op),
- # we may relax this constraint if
- for k in all_versioned_blobs:
- if k not in _known_status:
- raise NotImplementedError(
- "Can not infer the status for {}. Currently only support the case where"
- " a single forward and backward pass can identify status for all blobs.".format(k)
- )
-
- return _known_status
-
-
-def infer_device_type(
- predict_net: caffe2_pb2.NetDef,
- known_status: Dict[Tuple[str, int], Any],
- device_name_style: str = "caffe2",
-) -> Dict[Tuple[str, int], str]:
- """Return the device type ("cpu" or "gpu"/"cuda") of each (versioned) blob"""
-
- assert device_name_style in ["caffe2", "pytorch"]
- _CPU_STR = "cpu"
- _GPU_STR = "gpu" if device_name_style == "caffe2" else "cuda"
-
- def _copy_cpu_to_gpu_updater(op, input_types, output_types):
- if input_types[0] == _GPU_STR or output_types[0] == _CPU_STR:
- _updater_raise(op, input_types, output_types)
- return ([_CPU_STR], [_GPU_STR])
-
- def _copy_gpu_to_cpu_updater(op, input_types, output_types):
- if input_types[0] == _CPU_STR or output_types[0] == _GPU_STR:
- _updater_raise(op, input_types, output_types)
- return ([_GPU_STR], [_CPU_STR])
-
- def _other_ops_updater(op, input_types, output_types):
- non_none_types = [x for x in input_types + output_types if x is not None]
- if len(non_none_types) > 0:
- the_type = non_none_types[0]
- if not all(x == the_type for x in non_none_types):
- _updater_raise(op, input_types, output_types)
- else:
- the_type = None
- return ([the_type for _ in op.input], [the_type for _ in op.output])
-
- def _device_updater(op, *args, **kwargs):
- return {
- "CopyCPUToGPU": _copy_cpu_to_gpu_updater,
- "CopyGPUToCPU": _copy_gpu_to_cpu_updater,
- }.get(op.type, _other_ops_updater)(op, *args, **kwargs)
-
- return _generic_status_identifier(predict_net, _device_updater, known_status)
-
-
-# ==== torch/utils_caffe2/vis.py ===============================================
-
-
-def _modify_blob_names(ops, blob_rename_f):
- ret = []
-
- def _replace_list(blob_list, replaced_list):
- del blob_list[:]
- blob_list.extend(replaced_list)
-
- for x in ops:
- cur = copy.deepcopy(x)
- _replace_list(cur.input, list(map(blob_rename_f, cur.input)))
- _replace_list(cur.output, list(map(blob_rename_f, cur.output)))
- ret.append(cur)
-
- return ret
-
-
-def _rename_blob(name, blob_sizes, blob_ranges):
- def _list_to_str(bsize):
- ret = ", ".join([str(x) for x in bsize])
- ret = "[" + ret + "]"
- return ret
-
- ret = name
- if blob_sizes is not None and name in blob_sizes:
- ret += "\n" + _list_to_str(blob_sizes[name])
- if blob_ranges is not None and name in blob_ranges:
- ret += "\n" + _list_to_str(blob_ranges[name])
-
- return ret
-
-
-# graph_name could not contain word 'graph'
-def save_graph(net, file_name, graph_name="net", op_only=True, blob_sizes=None, blob_ranges=None):
- blob_rename_f = functools.partial(_rename_blob, blob_sizes=blob_sizes, blob_ranges=blob_ranges)
- return save_graph_base(net, file_name, graph_name, op_only, blob_rename_f)
-
-
-def save_graph_base(net, file_name, graph_name="net", op_only=True, blob_rename_func=None):
- graph = None
- ops = net.op
- if blob_rename_func is not None:
- ops = _modify_blob_names(ops, blob_rename_func)
- if not op_only:
- graph = net_drawer.GetPydotGraph(ops, graph_name, rankdir="TB")
- else:
- graph = net_drawer.GetPydotGraphMinimal(
- ops, graph_name, rankdir="TB", minimal_dependency=True
- )
-
- try:
- par_dir = os.path.dirname(file_name)
- if not os.path.exists(par_dir):
- os.makedirs(par_dir)
-
- format = os.path.splitext(os.path.basename(file_name))[-1]
- if format == ".png":
- graph.write_png(file_name)
- elif format == ".pdf":
- graph.write_pdf(file_name)
- elif format == ".svg":
- graph.write_svg(file_name)
- else:
- print("Incorrect format {}".format(format))
- except Exception as e:
- print("Error when writing graph to image {}".format(e))
-
- return graph
-
-
-# ==== torch/utils_toffee/aten_to_caffe2.py ====================================
-
-
-def group_norm_replace_aten_with_caffe2(predict_net: caffe2_pb2.NetDef):
- """
- For ONNX exported model, GroupNorm will be represented as ATen op,
- this can be a drop in replacement from ATen to GroupNorm
- """
- count = 0
- for op in predict_net.op:
- if op.type == "ATen":
- op_name = get_pb_arg_vals(op, "operator", None) # return byte in py3
- if op_name and op_name.decode() == "group_norm":
- op.arg.remove(get_pb_arg(op, "operator"))
-
- if get_pb_arg_vali(op, "cudnn_enabled", None):
- op.arg.remove(get_pb_arg(op, "cudnn_enabled"))
-
- num_groups = get_pb_arg_vali(op, "num_groups", None)
- if num_groups is not None:
- op.arg.remove(get_pb_arg(op, "num_groups"))
- check_set_pb_arg(op, "group", "i", num_groups)
-
- op.type = "GroupNorm"
- count += 1
- if count > 1:
- logger.info("Replaced {} ATen operator to GroupNormOp".format(count))
-
-
-# ==== torch/utils_toffee/alias.py =============================================
-
-
-def alias(x, name, is_backward=False):
- if not torch.onnx.is_in_onnx_export():
- return x
- assert isinstance(x, torch.Tensor)
- return torch.ops._caffe2.AliasWithName(x, name, is_backward=is_backward)
-
-
-def fuse_alias_placeholder(predict_net, init_net):
- """Remove AliasWithName placeholder and rename the input/output of it"""
- # First we finish all the re-naming
- for i, op in enumerate(predict_net.op):
- if op.type == "AliasWithName":
- assert len(op.input) == 1
- assert len(op.output) == 1
- name = get_pb_arg_vals(op, "name", None).decode()
- is_backward = bool(get_pb_arg_vali(op, "is_backward", 0))
- rename_op_input(predict_net, init_net, i, 0, name, from_producer=is_backward)
- rename_op_output(predict_net, i, 0, name)
-
- # Remove AliasWithName, should be very safe since it's a non-op
- new_ops = []
- for op in predict_net.op:
- if op.type != "AliasWithName":
- new_ops.append(op)
- else:
- # safety check
- assert op.input == op.output
- assert op.input[0] == op.arg[0].s.decode()
- del predict_net.op[:]
- predict_net.op.extend(new_ops)
-
-
-# ==== torch/utils_caffe2/graph_transform.py ===================================
-
-
-class IllegalGraphTransformError(ValueError):
- """When a graph transform function call can't be executed."""
-
-
-def _rename_versioned_blob_in_proto(
- proto: caffe2_pb2.NetDef,
- old_name: str,
- new_name: str,
- version: int,
- ssa: List[Tuple[List[Tuple[str, int]], List[Tuple[str, int]]]],
- start_versions: Dict[str, int],
- end_versions: Dict[str, int],
-):
- """In given proto, rename all blobs with matched version"""
- # Operater list
- for op, i_th_ssa in zip(proto.op, ssa):
- versioned_inputs, versioned_outputs = i_th_ssa
- for i in range(len(op.input)):
- if versioned_inputs[i] == (old_name, version):
- op.input[i] = new_name
- for i in range(len(op.output)):
- if versioned_outputs[i] == (old_name, version):
- op.output[i] = new_name
- # external_input
- if start_versions.get(old_name, 0) == version:
- for i in range(len(proto.external_input)):
- if proto.external_input[i] == old_name:
- proto.external_input[i] = new_name
- # external_output
- if end_versions.get(old_name, 0) == version:
- for i in range(len(proto.external_output)):
- if proto.external_output[i] == old_name:
- proto.external_output[i] = new_name
-
-
-def rename_op_input(
- predict_net: caffe2_pb2.NetDef,
- init_net: caffe2_pb2.NetDef,
- op_id: int,
- input_id: int,
- new_name: str,
- from_producer: bool = False,
-):
- """
- Rename the op_id-th operator in predict_net, change it's input_id-th input's
- name to the new_name. It also does automatic re-route and change
- external_input and init_net if necessary.
- - It requires the input is only consumed by this op.
- - This function modifies predict_net and init_net in-place.
- - When from_producer is enable, this also updates other operators that consumes
- the same input. Be cautious because may trigger unintended behavior.
- """
- assert isinstance(predict_net, caffe2_pb2.NetDef)
- assert isinstance(init_net, caffe2_pb2.NetDef)
-
- init_net_ssa, init_net_versions = core.get_ssa(init_net)
- predict_net_ssa, predict_net_versions = core.get_ssa(
- predict_net, copy.deepcopy(init_net_versions)
- )
-
- versioned_inputs, versioned_outputs = predict_net_ssa[op_id]
- old_name, version = versioned_inputs[input_id]
-
- if from_producer:
- producer_map = get_producer_map(predict_net_ssa)
- if not (old_name, version) in producer_map:
- raise NotImplementedError(
- "Can't find producer, the input {} is probably from"
- " init_net, this is not supported yet.".format(old_name)
- )
- producer = producer_map[(old_name, version)]
- rename_op_output(predict_net, producer[0], producer[1], new_name)
- return
-
- def contain_targets(op_ssa):
- return (old_name, version) in op_ssa[0]
-
- is_consumer = [contain_targets(op_ssa) for op_ssa in predict_net_ssa]
- if sum(is_consumer) > 1:
- raise IllegalGraphTransformError(
- (
- "Input '{}' of operator(#{}) are consumed by other ops, please use"
- + " rename_op_output on the producer instead. Offending op: \n{}"
- ).format(old_name, op_id, predict_net.op[op_id])
- )
-
- # update init_net
- _rename_versioned_blob_in_proto(
- init_net, old_name, new_name, version, init_net_ssa, {}, init_net_versions
- )
- # update predict_net
- _rename_versioned_blob_in_proto(
- predict_net,
- old_name,
- new_name,
- version,
- predict_net_ssa,
- init_net_versions,
- predict_net_versions,
- )
-
-
-def rename_op_output(predict_net: caffe2_pb2.NetDef, op_id: int, output_id: int, new_name: str):
- """
- Rename the op_id-th operator in predict_net, change it's output_id-th input's
- name to the new_name. It also does automatic re-route and change
- external_output and if necessary.
- - It allows multiple consumers of its output.
- - This function modifies predict_net in-place, doesn't need init_net.
- """
- assert isinstance(predict_net, caffe2_pb2.NetDef)
-
- ssa, blob_versions = core.get_ssa(predict_net)
-
- versioned_inputs, versioned_outputs = ssa[op_id]
- old_name, version = versioned_outputs[output_id]
-
- # update predict_net
- _rename_versioned_blob_in_proto(
- predict_net, old_name, new_name, version, ssa, {}, blob_versions
- )
-
-
-def get_sub_graph_external_input_output(
- predict_net: caffe2_pb2.NetDef, sub_graph_op_indices: List[int]
-) -> Tuple[List[Tuple[str, int]], List[Tuple[str, int]]]:
- """
- Return the list of external input/output of sub-graph,
- each element is tuple of the name and corresponding version in predict_net.
-
- external input/output is defined the same way as caffe2 NetDef.
- """
- ssa, versions = core.get_ssa(predict_net)
-
- all_inputs = []
- all_outputs = []
- for op_id in sub_graph_op_indices:
- all_inputs += [inp for inp in ssa[op_id][0] if inp not in all_inputs]
- all_outputs += list(ssa[op_id][1]) # ssa output won't repeat
-
- # for versioned blobs, external inputs are just those blob in all_inputs
- # but not in all_outputs
- ext_inputs = [inp for inp in all_inputs if inp not in all_outputs]
-
- # external outputs are essentially outputs of this subgraph that are used
- # outside of this sub-graph (including predict_net.external_output)
- all_other_inputs = sum(
- (ssa[i][0] for i in range(len(ssa)) if i not in sub_graph_op_indices),
- [(outp, versions[outp]) for outp in predict_net.external_output],
- )
- ext_outputs = [outp for outp in all_outputs if outp in set(all_other_inputs)]
-
- return ext_inputs, ext_outputs
-
-
-class DiGraph:
- """A DAG representation of caffe2 graph, each vertice is a versioned blob."""
-
- def __init__(self):
- self.vertices = set()
- self.graph = collections.defaultdict(list)
-
- def add_edge(self, u, v):
- self.graph[u].append(v)
- self.vertices.add(u)
- self.vertices.add(v)
-
- # grab from https://www.geeksforgeeks.org/find-paths-given-source-destination/
- def get_all_paths(self, s, d):
- visited = {k: False for k in self.vertices}
- path = []
- all_paths = []
-
- def _get_all_paths_util(graph, u, d, visited, path):
- visited[u] = True
- path.append(u)
- if u == d:
- all_paths.append(copy.deepcopy(path))
- else:
- for i in graph[u]:
- if not visited[i]:
- _get_all_paths_util(graph, i, d, visited, path)
- path.pop()
- visited[u] = False
-
- _get_all_paths_util(self.graph, s, d, visited, path)
- return all_paths
-
- @staticmethod
- def from_ssa(ssa):
- graph = DiGraph()
- for op_id in range(len(ssa)):
- for inp in ssa[op_id][0]:
- for outp in ssa[op_id][1]:
- graph.add_edge(inp, outp)
- return graph
-
-
-def _get_dependency_chain(ssa, versioned_target, versioned_source):
- """
- Return the index list of relevant operator to produce target blob from source blob,
- if there's no dependency, return empty list.
- """
-
- # finding all paths between nodes can be O(N!), thus we can only search
- # in the subgraph using the op starting from the first consumer of source blob
- # to the producer of the target blob.
- consumer_map = get_consumer_map(ssa)
- producer_map = get_producer_map(ssa)
- start_op = min(x[0] for x in consumer_map[versioned_source]) - 15
- end_op = (
- producer_map[versioned_target][0] + 15 if versioned_target in producer_map else start_op
- )
- sub_graph_ssa = ssa[start_op : end_op + 1]
- if len(sub_graph_ssa) > 30:
- logger.warning(
- "Subgraph bebetween {} and {} is large (from op#{} to op#{}), it"
- " might take non-trival time to find all paths between them.".format(
- versioned_source, versioned_target, start_op, end_op
- )
- )
-
- dag = DiGraph.from_ssa(sub_graph_ssa)
- paths = dag.get_all_paths(versioned_source, versioned_target) # include two ends
- ops_in_paths = [[producer_map[blob][0] for blob in path[1:]] for path in paths]
- return sorted(set().union(*[set(ops) for ops in ops_in_paths]))
-
-
-def identify_reshape_sub_graph(predict_net: caffe2_pb2.NetDef) -> List[List[int]]:
- """
- Idenfity the reshape sub-graph in a protobuf.
- The reshape sub-graph is defined as matching the following pattern:
-
- (input_blob) -> Op_1 -> ... -> Op_N -> (new_shape) -─┐
- └-------------------------------------------> Reshape -> (output_blob)
-
- Return:
- List of sub-graphs, each sub-graph is represented as a list of indices
- of the relavent ops, [Op_1, Op_2, ..., Op_N, Reshape]
- """
-
- ssa, _ = core.get_ssa(predict_net)
-
- ret = []
- for i, op in enumerate(predict_net.op):
- if op.type == "Reshape":
- assert len(op.input) == 2
- input_ssa = ssa[i][0]
- data_source = input_ssa[0]
- shape_source = input_ssa[1]
- op_indices = _get_dependency_chain(ssa, shape_source, data_source)
- ret.append(op_indices + [i])
- return ret
-
-
-def remove_reshape_for_fc(predict_net, params):
- """
- In PyTorch nn.Linear has to take 2D tensor, this often leads to reshape
- a 4D tensor to 2D by calling .view(). However this (dynamic) reshaping
- doesn't work well with ONNX and Int8 tools, and cause using extra
- ops (eg. ExpandDims) that might not be available on mobile.
- Luckily Caffe2 supports 4D tensor for FC, so we can remove those reshape
- after exporting ONNX model.
- """
- from caffe2.python import core
-
- # find all reshape sub-graph that can be removed, which is now all Reshape
- # sub-graph whose output is only consumed by FC.
- # TODO: to make it safer, we may need the actually value to better determine
- # if a Reshape before FC is removable.
- reshape_sub_graphs = identify_reshape_sub_graph(predict_net)
- sub_graphs_to_remove = []
- for reshape_sub_graph in reshape_sub_graphs:
- reshape_op_id = reshape_sub_graph[-1]
- assert predict_net.op[reshape_op_id].type == "Reshape"
- ssa, _ = core.get_ssa(predict_net)
- reshape_output = ssa[reshape_op_id][1][0]
- consumers = [i for i in range(len(ssa)) if reshape_output in ssa[i][0]]
- if all(predict_net.op[consumer].type == "FC" for consumer in consumers):
- # safety check if the sub-graph is isolated, for this reshape sub-graph,
- # it means it has one non-param external input and one external output.
- ext_inputs, ext_outputs = get_sub_graph_external_input_output(
- predict_net, reshape_sub_graph
- )
- non_params_ext_inputs = [inp for inp in ext_inputs if inp[1] != 0]
- if len(non_params_ext_inputs) == 1 and len(ext_outputs) == 1:
- sub_graphs_to_remove.append(reshape_sub_graph)
-
- # perform removing subgraph by:
- # 1: rename the Reshape's output to its input, then the graph can be
- # seen as in-place itentify, meaning whose external input/output are the same.
- # 2: simply remove those ops.
- remove_op_ids = []
- params_to_remove = []
- for sub_graph in sub_graphs_to_remove:
- logger.info(
- "Remove Reshape sub-graph:\n{}".format(
- "".join(["(#{:>4})\n{}".format(i, predict_net.op[i]) for i in sub_graph])
- )
- )
- reshape_op_id = sub_graph[-1]
- new_reshap_output = predict_net.op[reshape_op_id].input[0]
- rename_op_output(predict_net, reshape_op_id, 0, new_reshap_output)
- ext_inputs, ext_outputs = get_sub_graph_external_input_output(predict_net, sub_graph)
- non_params_ext_inputs = [inp for inp in ext_inputs if inp[1] != 0]
- params_ext_inputs = [inp for inp in ext_inputs if inp[1] == 0]
- assert len(non_params_ext_inputs) == 1 and len(ext_outputs) == 1
- assert ext_outputs[0][0] == non_params_ext_inputs[0][0]
- assert ext_outputs[0][1] == non_params_ext_inputs[0][1] + 1
- remove_op_ids.extend(sub_graph)
- params_to_remove.extend(params_ext_inputs)
-
- predict_net = copy.deepcopy(predict_net)
- new_ops = [op for i, op in enumerate(predict_net.op) if i not in remove_op_ids]
- del predict_net.op[:]
- predict_net.op.extend(new_ops)
- for versioned_params in params_to_remove:
- name = versioned_params[0]
- logger.info("Remove params: {} from init_net and predict_net.external_input".format(name))
- del params[name]
- predict_net.external_input.remove(name)
-
- return predict_net, params
-
-
-def fuse_copy_between_cpu_and_gpu(predict_net: caffe2_pb2.NetDef):
- """
- In-place fuse extra copy ops between cpu/gpu for the following case:
- a -CopyAToB-> b -CopyBToA> c1 -NextOp1-> d1
- -CopyBToA> c2 -NextOp2-> d2
- The fused network will look like:
- a -NextOp1-> d1
- -NextOp2-> d2
- """
-
- _COPY_OPS = ["CopyCPUToGPU", "CopyGPUToCPU"]
-
- def _fuse_once(predict_net):
- ssa, blob_versions = core.get_ssa(predict_net)
- consumer_map = get_consumer_map(ssa)
- versioned_external_output = [
- (name, blob_versions[name]) for name in predict_net.external_output
- ]
-
- for op_id, op in enumerate(predict_net.op):
- if op.type in _COPY_OPS:
- fw_copy_versioned_output = ssa[op_id][1][0]
- consumer_ids = [x[0] for x in consumer_map[fw_copy_versioned_output]]
- reverse_op_type = _COPY_OPS[1 - _COPY_OPS.index(op.type)]
-
- is_fusable = (
- len(consumer_ids) > 0
- and fw_copy_versioned_output not in versioned_external_output
- and all(
- predict_net.op[_op_id].type == reverse_op_type
- and ssa[_op_id][1][0] not in versioned_external_output
- for _op_id in consumer_ids
- )
- )
-
- if is_fusable:
- for rv_copy_op_id in consumer_ids:
- # making each NextOp uses "a" directly and removing Copy ops
- rs_copy_versioned_output = ssa[rv_copy_op_id][1][0]
- next_op_id, inp_id = consumer_map[rs_copy_versioned_output][0]
- predict_net.op[next_op_id].input[inp_id] = op.input[0]
- # remove CopyOps
- new_ops = [
- op
- for i, op in enumerate(predict_net.op)
- if i != op_id and i not in consumer_ids
- ]
- del predict_net.op[:]
- predict_net.op.extend(new_ops)
- return True
-
- return False
-
- # _fuse_once returns False is nothing can be fused
- while _fuse_once(predict_net):
- pass
-
-
-def remove_dead_end_ops(net_def: caffe2_pb2.NetDef):
- """remove ops if its output is not used or not in external_output"""
- ssa, versions = core.get_ssa(net_def)
- versioned_external_output = [(name, versions[name]) for name in net_def.external_output]
- consumer_map = get_consumer_map(ssa)
- removed_op_ids = set()
-
- def _is_dead_end(versioned_blob):
- return not (
- versioned_blob in versioned_external_output
- or (
- len(consumer_map[versioned_blob]) > 0
- and all(x[0] not in removed_op_ids for x in consumer_map[versioned_blob])
- )
- )
-
- for i, ssa_i in reversed(list(enumerate(ssa))):
- versioned_outputs = ssa_i[1]
- if all(_is_dead_end(outp) for outp in versioned_outputs):
- removed_op_ids.add(i)
-
- # simply removing those deadend ops should have no effect to external_output
- new_ops = [op for i, op in enumerate(net_def.op) if i not in removed_op_ids]
- del net_def.op[:]
- net_def.op.extend(new_ops)
diff --git a/spaces/OpenMotionLab/MotionGPT/mGPT/render/blender/tools.py b/spaces/OpenMotionLab/MotionGPT/mGPT/render/blender/tools.py
deleted file mode 100644
index 3c64ea62f3934b4c356bd7f29cd9c949f58d1050..0000000000000000000000000000000000000000
--- a/spaces/OpenMotionLab/MotionGPT/mGPT/render/blender/tools.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import bpy
-import numpy as np
-
-
-def style_detect(data):
- is_mesh = False
- is_smplx = False
- jointstyle = 'mmm'
- # heuristic
- if data.shape[1] > 1000:
- is_mesh = True
- if data.shape[1] == 10475:
- is_smplx = True
- if data.shape[1] == 22:
- jointstyle = 'humanml3d'
-
- return is_mesh, is_smplx, jointstyle
-
-
-
-# see this for more explanation
-# https://gist.github.com/iyadahmed/7c7c0fae03c40bd87e75dc7059e35377
-# This should be solved with new version of blender
-class ndarray_pydata(np.ndarray):
- def __bool__(self) -> bool:
- return len(self) > 0
-
-
-def load_numpy_vertices_into_blender(vertices, faces, name, mat):
- mesh = bpy.data.meshes.new(name)
- mesh.from_pydata(vertices, [], faces.view(ndarray_pydata))
- mesh.validate()
-
- obj = bpy.data.objects.new(name, mesh)
- bpy.context.scene.collection.objects.link(obj)
-
- bpy.ops.object.select_all(action='DESELECT')
- obj.select_set(True)
- obj.active_material = mat
- bpy.context.view_layer.objects.active = obj
- bpy.ops.object.shade_smooth()
- bpy.ops.object.select_all(action='DESELECT')
- return True
-
-
-def delete_objs(names):
- if not isinstance(names, list):
- names = [names]
- # bpy.ops.object.mode_set(mode='OBJECT')
- bpy.ops.object.select_all(action='DESELECT')
- for obj in bpy.context.scene.objects:
- for name in names:
- if obj.name.startswith(name) or obj.name.endswith(name):
- obj.select_set(True)
- bpy.ops.object.delete()
- bpy.ops.object.select_all(action='DESELECT')
diff --git a/spaces/PAIR/PAIR-Diffusion/ldm/modules/distributions/distributions.py b/spaces/PAIR/PAIR-Diffusion/ldm/modules/distributions/distributions.py
deleted file mode 100644
index f2b8ef901130efc171aa69742ca0244d94d3f2e9..0000000000000000000000000000000000000000
--- a/spaces/PAIR/PAIR-Diffusion/ldm/modules/distributions/distributions.py
+++ /dev/null
@@ -1,92 +0,0 @@
-import torch
-import numpy as np
-
-
-class AbstractDistribution:
- def sample(self):
- raise NotImplementedError()
-
- def mode(self):
- raise NotImplementedError()
-
-
-class DiracDistribution(AbstractDistribution):
- def __init__(self, value):
- self.value = value
-
- def sample(self):
- return self.value
-
- def mode(self):
- return self.value
-
-
-class DiagonalGaussianDistribution(object):
- def __init__(self, parameters, deterministic=False):
- self.parameters = parameters
- self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
- self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
- self.deterministic = deterministic
- self.std = torch.exp(0.5 * self.logvar)
- self.var = torch.exp(self.logvar)
- if self.deterministic:
- self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)
-
- def sample(self):
- x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)
- return x
-
- def kl(self, other=None):
- if self.deterministic:
- return torch.Tensor([0.])
- else:
- if other is None:
- return 0.5 * torch.sum(torch.pow(self.mean, 2)
- + self.var - 1.0 - self.logvar,
- dim=[1, 2, 3])
- else:
- return 0.5 * torch.sum(
- torch.pow(self.mean - other.mean, 2) / other.var
- + self.var / other.var - 1.0 - self.logvar + other.logvar,
- dim=[1, 2, 3])
-
- def nll(self, sample, dims=[1,2,3]):
- if self.deterministic:
- return torch.Tensor([0.])
- logtwopi = np.log(2.0 * np.pi)
- return 0.5 * torch.sum(
- logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
- dim=dims)
-
- def mode(self):
- return self.mean
-
-
-def normal_kl(mean1, logvar1, mean2, logvar2):
- """
- source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12
- Compute the KL divergence between two gaussians.
- Shapes are automatically broadcasted, so batches can be compared to
- scalars, among other use cases.
- """
- tensor = None
- for obj in (mean1, logvar1, mean2, logvar2):
- if isinstance(obj, torch.Tensor):
- tensor = obj
- break
- assert tensor is not None, "at least one argument must be a Tensor"
-
- # Force variances to be Tensors. Broadcasting helps convert scalars to
- # Tensors, but it does not work for torch.exp().
- logvar1, logvar2 = [
- x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)
- for x in (logvar1, logvar2)
- ]
-
- return 0.5 * (
- -1.0
- + logvar2
- - logvar1
- + torch.exp(logvar1 - logvar2)
- + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)
- )
diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/fileio/handlers/json_handler.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/fileio/handlers/json_handler.py
deleted file mode 100644
index 18d4f15f74139d20adff18b20be5529c592a66b6..0000000000000000000000000000000000000000
--- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/fileio/handlers/json_handler.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import json
-
-import numpy as np
-
-from .base import BaseFileHandler
-
-
-def set_default(obj):
- """Set default json values for non-serializable values.
-
- It helps convert ``set``, ``range`` and ``np.ndarray`` data types to list.
- It also converts ``np.generic`` (including ``np.int32``, ``np.float32``,
- etc.) into plain numbers of plain python built-in types.
- """
- if isinstance(obj, (set, range)):
- return list(obj)
- elif isinstance(obj, np.ndarray):
- return obj.tolist()
- elif isinstance(obj, np.generic):
- return obj.item()
- raise TypeError(f'{type(obj)} is unsupported for json dump')
-
-
-class JsonHandler(BaseFileHandler):
-
- def load_from_fileobj(self, file):
- return json.load(file)
-
- def dump_to_fileobj(self, obj, file, **kwargs):
- kwargs.setdefault('default', set_default)
- json.dump(obj, file, **kwargs)
-
- def dump_to_str(self, obj, **kwargs):
- kwargs.setdefault('default', set_default)
- return json.dumps(obj, **kwargs)
diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/receive.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/receive.go
deleted file mode 100644
index 7ebaa9ee2611b120d06c5a99ea2871f36c1ed7be..0000000000000000000000000000000000000000
Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/ice-9/receive.go and /dev/null differ
diff --git a/spaces/Persival123/thisisitboiiii/README.md b/spaces/Persival123/thisisitboiiii/README.md
deleted file mode 100644
index 0d5a34df745550576fd4106400638c2afa0bac76..0000000000000000000000000000000000000000
--- a/spaces/Persival123/thisisitboiiii/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Thisisitboiiii
-emoji: 🐨
-colorFrom: gray
-colorTo: indigo
-sdk: streamlit
-sdk_version: 1.26.0
-app_file: app.py
-pinned: false
-license: artistic-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/evaluation/vg/__init__.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/evaluation/vg/__init__.py
deleted file mode 100644
index a07b0bc1263aba5428c74ff392726e7d4299ef78..0000000000000000000000000000000000000000
--- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/evaluation/vg/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import logging
-
-from .vg_eval import do_vg_evaluation
-
-
-def vg_evaluation(dataset, predictions, output_folder, box_only, eval_attributes=False, **_):
- logger = logging.getLogger("maskrcnn_benchmark.inference")
- logger.info("performing vg evaluation, ignored iou_types.")
- return do_vg_evaluation(
- dataset=dataset,
- predictions=predictions,
- output_folder=output_folder,
- box_only=box_only,
- eval_attributes=eval_attributes,
- logger=logger,
- )
diff --git a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/optim/cosine_lr_scheduler.py b/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/optim/cosine_lr_scheduler.py
deleted file mode 100644
index 1e4f0bbf28f1ad893a301f1bfac1da8e97370337..0000000000000000000000000000000000000000
--- a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/optim/cosine_lr_scheduler.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-
-from torch.optim import Optimizer
-from torch.optim.lr_scheduler import _LRScheduler
-
-
-class CosineLRScheduler(_LRScheduler):
- """Cosine LR scheduler.
-
- Args:
- optimizer (Optimizer): Torch optimizer.
- warmup_steps (int): Number of warmup steps.
- total_steps (int): Total number of steps.
- lr_min_ratio (float): Minimum learning rate.
- cycle_length (float): Cycle length.
- """
- def __init__(self, optimizer: Optimizer, total_steps: int, warmup_steps: int,
- lr_min_ratio: float = 0.0, cycle_length: float = 1.0):
- self.warmup_steps = warmup_steps
- assert self.warmup_steps >= 0
- self.total_steps = total_steps
- assert self.total_steps >= 0
- self.lr_min_ratio = lr_min_ratio
- self.cycle_length = cycle_length
- super().__init__(optimizer)
-
- def _get_sched_lr(self, lr: float, step: int):
- if step < self.warmup_steps:
- lr_ratio = step / self.warmup_steps
- lr = lr_ratio * lr
- elif step <= self.total_steps:
- s = (step - self.warmup_steps) / (self.total_steps - self.warmup_steps)
- lr_ratio = self.lr_min_ratio + 0.5 * (1 - self.lr_min_ratio) * \
- (1. + math.cos(math.pi * s / self.cycle_length))
- lr = lr_ratio * lr
- else:
- lr_ratio = self.lr_min_ratio
- lr = lr_ratio * lr
- return lr
-
- def get_lr(self):
- return [self._get_sched_lr(lr, self.last_epoch) for lr in self.base_lrs]
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/locations/__init__.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/locations/__init__.py
deleted file mode 100644
index 60afe0a73b8d9e09b27a56a9132bdc7f70b84fe1..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/locations/__init__.py
+++ /dev/null
@@ -1,528 +0,0 @@
-import functools
-import logging
-import os
-import pathlib
-import sys
-import sysconfig
-from typing import Any, Dict, Generator, List, Optional, Tuple
-
-from pip._internal.models.scheme import SCHEME_KEYS, Scheme
-from pip._internal.utils.compat import WINDOWS
-from pip._internal.utils.deprecation import deprecated
-from pip._internal.utils.virtualenv import running_under_virtualenv
-
-from . import _sysconfig
-from .base import (
- USER_CACHE_DIR,
- get_major_minor_version,
- get_src_prefix,
- is_osx_framework,
- site_packages,
- user_site,
-)
-
-__all__ = [
- "USER_CACHE_DIR",
- "get_bin_prefix",
- "get_bin_user",
- "get_major_minor_version",
- "get_platlib",
- "get_prefixed_libs",
- "get_purelib",
- "get_scheme",
- "get_src_prefix",
- "site_packages",
- "user_site",
-]
-
-
-logger = logging.getLogger(__name__)
-
-
-_PLATLIBDIR: str = getattr(sys, "platlibdir", "lib")
-
-_USE_SYSCONFIG_DEFAULT = sys.version_info >= (3, 10)
-
-
-def _should_use_sysconfig() -> bool:
- """This function determines the value of _USE_SYSCONFIG.
-
- By default, pip uses sysconfig on Python 3.10+.
- But Python distributors can override this decision by setting:
- sysconfig._PIP_USE_SYSCONFIG = True / False
- Rationale in https://github.com/pypa/pip/issues/10647
-
- This is a function for testability, but should be constant during any one
- run.
- """
- return bool(getattr(sysconfig, "_PIP_USE_SYSCONFIG", _USE_SYSCONFIG_DEFAULT))
-
-
-_USE_SYSCONFIG = _should_use_sysconfig()
-
-if not _USE_SYSCONFIG:
- # Import distutils lazily to avoid deprecation warnings,
- # but import it soon enough that it is in memory and available during
- # a pip reinstall.
- from . import _distutils
-
-# Be noisy about incompatibilities if this platforms "should" be using
-# sysconfig, but is explicitly opting out and using distutils instead.
-if _USE_SYSCONFIG_DEFAULT and not _USE_SYSCONFIG:
- _MISMATCH_LEVEL = logging.WARNING
-else:
- _MISMATCH_LEVEL = logging.DEBUG
-
-
-def _looks_like_bpo_44860() -> bool:
- """The resolution to bpo-44860 will change this incorrect platlib.
-
- See .
- """
- from distutils.command.install import INSTALL_SCHEMES
-
- try:
- unix_user_platlib = INSTALL_SCHEMES["unix_user"]["platlib"]
- except KeyError:
- return False
- return unix_user_platlib == "$usersite"
-
-
-def _looks_like_red_hat_patched_platlib_purelib(scheme: Dict[str, str]) -> bool:
- platlib = scheme["platlib"]
- if "/$platlibdir/" in platlib:
- platlib = platlib.replace("/$platlibdir/", f"/{_PLATLIBDIR}/")
- if "/lib64/" not in platlib:
- return False
- unpatched = platlib.replace("/lib64/", "/lib/")
- return unpatched.replace("$platbase/", "$base/") == scheme["purelib"]
-
-
-@functools.lru_cache(maxsize=None)
-def _looks_like_red_hat_lib() -> bool:
- """Red Hat patches platlib in unix_prefix and unix_home, but not purelib.
-
- This is the only way I can see to tell a Red Hat-patched Python.
- """
- from distutils.command.install import INSTALL_SCHEMES
-
- return all(
- k in INSTALL_SCHEMES
- and _looks_like_red_hat_patched_platlib_purelib(INSTALL_SCHEMES[k])
- for k in ("unix_prefix", "unix_home")
- )
-
-
-@functools.lru_cache(maxsize=None)
-def _looks_like_debian_scheme() -> bool:
- """Debian adds two additional schemes."""
- from distutils.command.install import INSTALL_SCHEMES
-
- return "deb_system" in INSTALL_SCHEMES and "unix_local" in INSTALL_SCHEMES
-
-
-@functools.lru_cache(maxsize=None)
-def _looks_like_red_hat_scheme() -> bool:
- """Red Hat patches ``sys.prefix`` and ``sys.exec_prefix``.
-
- Red Hat's ``00251-change-user-install-location.patch`` changes the install
- command's ``prefix`` and ``exec_prefix`` to append ``"/local"``. This is
- (fortunately?) done quite unconditionally, so we create a default command
- object without any configuration to detect this.
- """
- from distutils.command.install import install
- from distutils.dist import Distribution
-
- cmd: Any = install(Distribution())
- cmd.finalize_options()
- return (
- cmd.exec_prefix == f"{os.path.normpath(sys.exec_prefix)}/local"
- and cmd.prefix == f"{os.path.normpath(sys.prefix)}/local"
- )
-
-
-@functools.lru_cache(maxsize=None)
-def _looks_like_slackware_scheme() -> bool:
- """Slackware patches sysconfig but fails to patch distutils and site.
-
- Slackware changes sysconfig's user scheme to use ``"lib64"`` for the lib
- path, but does not do the same to the site module.
- """
- if user_site is None: # User-site not available.
- return False
- try:
- paths = sysconfig.get_paths(scheme="posix_user", expand=False)
- except KeyError: # User-site not available.
- return False
- return "/lib64/" in paths["purelib"] and "/lib64/" not in user_site
-
-
-@functools.lru_cache(maxsize=None)
-def _looks_like_msys2_mingw_scheme() -> bool:
- """MSYS2 patches distutils and sysconfig to use a UNIX-like scheme.
-
- However, MSYS2 incorrectly patches sysconfig ``nt`` scheme. The fix is
- likely going to be included in their 3.10 release, so we ignore the warning.
- See msys2/MINGW-packages#9319.
-
- MSYS2 MINGW's patch uses lowercase ``"lib"`` instead of the usual uppercase,
- and is missing the final ``"site-packages"``.
- """
- paths = sysconfig.get_paths("nt", expand=False)
- return all(
- "Lib" not in p and "lib" in p and not p.endswith("site-packages")
- for p in (paths[key] for key in ("platlib", "purelib"))
- )
-
-
-def _fix_abiflags(parts: Tuple[str]) -> Generator[str, None, None]:
- ldversion = sysconfig.get_config_var("LDVERSION")
- abiflags = getattr(sys, "abiflags", None)
-
- # LDVERSION does not end with sys.abiflags. Just return the path unchanged.
- if not ldversion or not abiflags or not ldversion.endswith(abiflags):
- yield from parts
- return
-
- # Strip sys.abiflags from LDVERSION-based path components.
- for part in parts:
- if part.endswith(ldversion):
- part = part[: (0 - len(abiflags))]
- yield part
-
-
-@functools.lru_cache(maxsize=None)
-def _warn_mismatched(old: pathlib.Path, new: pathlib.Path, *, key: str) -> None:
- issue_url = "https://github.com/pypa/pip/issues/10151"
- message = (
- "Value for %s does not match. Please report this to <%s>"
- "\ndistutils: %s"
- "\nsysconfig: %s"
- )
- logger.log(_MISMATCH_LEVEL, message, key, issue_url, old, new)
-
-
-def _warn_if_mismatch(old: pathlib.Path, new: pathlib.Path, *, key: str) -> bool:
- if old == new:
- return False
- _warn_mismatched(old, new, key=key)
- return True
-
-
-@functools.lru_cache(maxsize=None)
-def _log_context(
- *,
- user: bool = False,
- home: Optional[str] = None,
- root: Optional[str] = None,
- prefix: Optional[str] = None,
-) -> None:
- parts = [
- "Additional context:",
- "user = %r",
- "home = %r",
- "root = %r",
- "prefix = %r",
- ]
-
- logger.log(_MISMATCH_LEVEL, "\n".join(parts), user, home, root, prefix)
-
-
-def get_scheme(
- dist_name: str,
- user: bool = False,
- home: Optional[str] = None,
- root: Optional[str] = None,
- isolated: bool = False,
- prefix: Optional[str] = None,
-) -> Scheme:
- new = _sysconfig.get_scheme(
- dist_name,
- user=user,
- home=home,
- root=root,
- isolated=isolated,
- prefix=prefix,
- )
- if _USE_SYSCONFIG:
- return new
-
- old = _distutils.get_scheme(
- dist_name,
- user=user,
- home=home,
- root=root,
- isolated=isolated,
- prefix=prefix,
- )
-
- warning_contexts = []
- for k in SCHEME_KEYS:
- old_v = pathlib.Path(getattr(old, k))
- new_v = pathlib.Path(getattr(new, k))
-
- if old_v == new_v:
- continue
-
- # distutils incorrectly put PyPy packages under ``site-packages/python``
- # in the ``posix_home`` scheme, but PyPy devs said they expect the
- # directory name to be ``pypy`` instead. So we treat this as a bug fix
- # and not warn about it. See bpo-43307 and python/cpython#24628.
- skip_pypy_special_case = (
- sys.implementation.name == "pypy"
- and home is not None
- and k in ("platlib", "purelib")
- and old_v.parent == new_v.parent
- and old_v.name.startswith("python")
- and new_v.name.startswith("pypy")
- )
- if skip_pypy_special_case:
- continue
-
- # sysconfig's ``osx_framework_user`` does not include ``pythonX.Y`` in
- # the ``include`` value, but distutils's ``headers`` does. We'll let
- # CPython decide whether this is a bug or feature. See bpo-43948.
- skip_osx_framework_user_special_case = (
- user
- and is_osx_framework()
- and k == "headers"
- and old_v.parent.parent == new_v.parent
- and old_v.parent.name.startswith("python")
- )
- if skip_osx_framework_user_special_case:
- continue
-
- # On Red Hat and derived Linux distributions, distutils is patched to
- # use "lib64" instead of "lib" for platlib.
- if k == "platlib" and _looks_like_red_hat_lib():
- continue
-
- # On Python 3.9+, sysconfig's posix_user scheme sets platlib against
- # sys.platlibdir, but distutils's unix_user incorrectly coninutes
- # using the same $usersite for both platlib and purelib. This creates a
- # mismatch when sys.platlibdir is not "lib".
- skip_bpo_44860 = (
- user
- and k == "platlib"
- and not WINDOWS
- and sys.version_info >= (3, 9)
- and _PLATLIBDIR != "lib"
- and _looks_like_bpo_44860()
- )
- if skip_bpo_44860:
- continue
-
- # Slackware incorrectly patches posix_user to use lib64 instead of lib,
- # but not usersite to match the location.
- skip_slackware_user_scheme = (
- user
- and k in ("platlib", "purelib")
- and not WINDOWS
- and _looks_like_slackware_scheme()
- )
- if skip_slackware_user_scheme:
- continue
-
- # Both Debian and Red Hat patch Python to place the system site under
- # /usr/local instead of /usr. Debian also places lib in dist-packages
- # instead of site-packages, but the /usr/local check should cover it.
- skip_linux_system_special_case = (
- not (user or home or prefix or running_under_virtualenv())
- and old_v.parts[1:3] == ("usr", "local")
- and len(new_v.parts) > 1
- and new_v.parts[1] == "usr"
- and (len(new_v.parts) < 3 or new_v.parts[2] != "local")
- and (_looks_like_red_hat_scheme() or _looks_like_debian_scheme())
- )
- if skip_linux_system_special_case:
- continue
-
- # On Python 3.7 and earlier, sysconfig does not include sys.abiflags in
- # the "pythonX.Y" part of the path, but distutils does.
- skip_sysconfig_abiflag_bug = (
- sys.version_info < (3, 8)
- and not WINDOWS
- and k in ("headers", "platlib", "purelib")
- and tuple(_fix_abiflags(old_v.parts)) == new_v.parts
- )
- if skip_sysconfig_abiflag_bug:
- continue
-
- # MSYS2 MINGW's sysconfig patch does not include the "site-packages"
- # part of the path. This is incorrect and will be fixed in MSYS.
- skip_msys2_mingw_bug = (
- WINDOWS and k in ("platlib", "purelib") and _looks_like_msys2_mingw_scheme()
- )
- if skip_msys2_mingw_bug:
- continue
-
- # CPython's POSIX install script invokes pip (via ensurepip) against the
- # interpreter located in the source tree, not the install site. This
- # triggers special logic in sysconfig that's not present in distutils.
- # https://github.com/python/cpython/blob/8c21941ddaf/Lib/sysconfig.py#L178-L194
- skip_cpython_build = (
- sysconfig.is_python_build(check_home=True)
- and not WINDOWS
- and k in ("headers", "include", "platinclude")
- )
- if skip_cpython_build:
- continue
-
- warning_contexts.append((old_v, new_v, f"scheme.{k}"))
-
- if not warning_contexts:
- return old
-
- # Check if this path mismatch is caused by distutils config files. Those
- # files will no longer work once we switch to sysconfig, so this raises a
- # deprecation message for them.
- default_old = _distutils.distutils_scheme(
- dist_name,
- user,
- home,
- root,
- isolated,
- prefix,
- ignore_config_files=True,
- )
- if any(default_old[k] != getattr(old, k) for k in SCHEME_KEYS):
- deprecated(
- reason=(
- "Configuring installation scheme with distutils config files "
- "is deprecated and will no longer work in the near future. If you "
- "are using a Homebrew or Linuxbrew Python, please see discussion "
- "at https://github.com/Homebrew/homebrew-core/issues/76621"
- ),
- replacement=None,
- gone_in=None,
- )
- return old
-
- # Post warnings about this mismatch so user can report them back.
- for old_v, new_v, key in warning_contexts:
- _warn_mismatched(old_v, new_v, key=key)
- _log_context(user=user, home=home, root=root, prefix=prefix)
-
- return old
-
-
-def get_bin_prefix() -> str:
- new = _sysconfig.get_bin_prefix()
- if _USE_SYSCONFIG:
- return new
-
- old = _distutils.get_bin_prefix()
- if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="bin_prefix"):
- _log_context()
- return old
-
-
-def get_bin_user() -> str:
- return _sysconfig.get_scheme("", user=True).scripts
-
-
-def _looks_like_deb_system_dist_packages(value: str) -> bool:
- """Check if the value is Debian's APT-controlled dist-packages.
-
- Debian's ``distutils.sysconfig.get_python_lib()`` implementation returns the
- default package path controlled by APT, but does not patch ``sysconfig`` to
- do the same. This is similar to the bug worked around in ``get_scheme()``,
- but here the default is ``deb_system`` instead of ``unix_local``. Ultimately
- we can't do anything about this Debian bug, and this detection allows us to
- skip the warning when needed.
- """
- if not _looks_like_debian_scheme():
- return False
- if value == "/usr/lib/python3/dist-packages":
- return True
- return False
-
-
-def get_purelib() -> str:
- """Return the default pure-Python lib location."""
- new = _sysconfig.get_purelib()
- if _USE_SYSCONFIG:
- return new
-
- old = _distutils.get_purelib()
- if _looks_like_deb_system_dist_packages(old):
- return old
- if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="purelib"):
- _log_context()
- return old
-
-
-def get_platlib() -> str:
- """Return the default platform-shared lib location."""
- new = _sysconfig.get_platlib()
- if _USE_SYSCONFIG:
- return new
-
- from . import _distutils
-
- old = _distutils.get_platlib()
- if _looks_like_deb_system_dist_packages(old):
- return old
- if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="platlib"):
- _log_context()
- return old
-
-
-def _deduplicated(v1: str, v2: str) -> List[str]:
- """Deduplicate values from a list."""
- if v1 == v2:
- return [v1]
- return [v1, v2]
-
-
-def _looks_like_apple_library(path: str) -> bool:
- """Apple patches sysconfig to *always* look under */Library/Python*."""
- if sys.platform[:6] != "darwin":
- return False
- return path == f"/Library/Python/{get_major_minor_version()}/site-packages"
-
-
-def get_prefixed_libs(prefix: str) -> List[str]:
- """Return the lib locations under ``prefix``."""
- new_pure, new_plat = _sysconfig.get_prefixed_libs(prefix)
- if _USE_SYSCONFIG:
- return _deduplicated(new_pure, new_plat)
-
- old_pure, old_plat = _distutils.get_prefixed_libs(prefix)
- old_lib_paths = _deduplicated(old_pure, old_plat)
-
- # Apple's Python (shipped with Xcode and Command Line Tools) hard-code
- # platlib and purelib to '/Library/Python/X.Y/site-packages'. This will
- # cause serious build isolation bugs when Apple starts shipping 3.10 because
- # pip will install build backends to the wrong location. This tells users
- # who is at fault so Apple may notice it and fix the issue in time.
- if all(_looks_like_apple_library(p) for p in old_lib_paths):
- deprecated(
- reason=(
- "Python distributed by Apple's Command Line Tools incorrectly "
- "patches sysconfig to always point to '/Library/Python'. This "
- "will cause build isolation to operate incorrectly on Python "
- "3.10 or later. Please help report this to Apple so they can "
- "fix this. https://developer.apple.com/bug-reporting/"
- ),
- replacement=None,
- gone_in=None,
- )
- return old_lib_paths
-
- warned = [
- _warn_if_mismatch(
- pathlib.Path(old_pure),
- pathlib.Path(new_pure),
- key="prefixed-purelib",
- ),
- _warn_if_mismatch(
- pathlib.Path(old_plat),
- pathlib.Path(new_plat),
- key="prefixed-platlib",
- ),
- ]
- if any(warned):
- _log_context(prefix=prefix)
-
- return old_lib_paths
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/utils/__init__.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_internal/utils/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pkg_resources/_vendor/pyparsing/core.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pkg_resources/_vendor/pyparsing/core.py
deleted file mode 100644
index 9acba3f3e984b404f52702964805732f03965048..0000000000000000000000000000000000000000
--- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pkg_resources/_vendor/pyparsing/core.py
+++ /dev/null
@@ -1,5814 +0,0 @@
-#
-# core.py
-#
-import os
-import typing
-from typing import (
- NamedTuple,
- Union,
- Callable,
- Any,
- Generator,
- Tuple,
- List,
- TextIO,
- Set,
- Sequence,
-)
-from abc import ABC, abstractmethod
-from enum import Enum
-import string
-import copy
-import warnings
-import re
-import sys
-from collections.abc import Iterable
-import traceback
-import types
-from operator import itemgetter
-from functools import wraps
-from threading import RLock
-from pathlib import Path
-
-from .util import (
- _FifoCache,
- _UnboundedCache,
- __config_flags,
- _collapse_string_to_ranges,
- _escape_regex_range_chars,
- _bslash,
- _flatten,
- LRUMemo as _LRUMemo,
- UnboundedMemo as _UnboundedMemo,
-)
-from .exceptions import *
-from .actions import *
-from .results import ParseResults, _ParseResultsWithOffset
-from .unicode import pyparsing_unicode
-
-_MAX_INT = sys.maxsize
-str_type: Tuple[type, ...] = (str, bytes)
-
-#
-# Copyright (c) 2003-2022 Paul T. McGuire
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-#
-
-
-if sys.version_info >= (3, 8):
- from functools import cached_property
-else:
-
- class cached_property:
- def __init__(self, func):
- self._func = func
-
- def __get__(self, instance, owner=None):
- ret = instance.__dict__[self._func.__name__] = self._func(instance)
- return ret
-
-
-class __compat__(__config_flags):
- """
- A cross-version compatibility configuration for pyparsing features that will be
- released in a future version. By setting values in this configuration to True,
- those features can be enabled in prior versions for compatibility development
- and testing.
-
- - ``collect_all_And_tokens`` - flag to enable fix for Issue #63 that fixes erroneous grouping
- of results names when an :class:`And` expression is nested within an :class:`Or` or :class:`MatchFirst`;
- maintained for compatibility, but setting to ``False`` no longer restores pre-2.3.1
- behavior
- """
-
- _type_desc = "compatibility"
-
- collect_all_And_tokens = True
-
- _all_names = [__ for __ in locals() if not __.startswith("_")]
- _fixed_names = """
- collect_all_And_tokens
- """.split()
-
-
-class __diag__(__config_flags):
- _type_desc = "diagnostic"
-
- warn_multiple_tokens_in_named_alternation = False
- warn_ungrouped_named_tokens_in_collection = False
- warn_name_set_on_empty_Forward = False
- warn_on_parse_using_empty_Forward = False
- warn_on_assignment_to_Forward = False
- warn_on_multiple_string_args_to_oneof = False
- warn_on_match_first_with_lshift_operator = False
- enable_debug_on_named_expressions = False
-
- _all_names = [__ for __ in locals() if not __.startswith("_")]
- _warning_names = [name for name in _all_names if name.startswith("warn")]
- _debug_names = [name for name in _all_names if name.startswith("enable_debug")]
-
- @classmethod
- def enable_all_warnings(cls) -> None:
- for name in cls._warning_names:
- cls.enable(name)
-
-
-class Diagnostics(Enum):
- """
- Diagnostic configuration (all default to disabled)
- - ``warn_multiple_tokens_in_named_alternation`` - flag to enable warnings when a results
- name is defined on a :class:`MatchFirst` or :class:`Or` expression with one or more :class:`And` subexpressions
- - ``warn_ungrouped_named_tokens_in_collection`` - flag to enable warnings when a results
- name is defined on a containing expression with ungrouped subexpressions that also
- have results names
- - ``warn_name_set_on_empty_Forward`` - flag to enable warnings when a :class:`Forward` is defined
- with a results name, but has no contents defined
- - ``warn_on_parse_using_empty_Forward`` - flag to enable warnings when a :class:`Forward` is
- defined in a grammar but has never had an expression attached to it
- - ``warn_on_assignment_to_Forward`` - flag to enable warnings when a :class:`Forward` is defined
- but is overwritten by assigning using ``'='`` instead of ``'<<='`` or ``'<<'``
- - ``warn_on_multiple_string_args_to_oneof`` - flag to enable warnings when :class:`one_of` is
- incorrectly called with multiple str arguments
- - ``enable_debug_on_named_expressions`` - flag to auto-enable debug on all subsequent
- calls to :class:`ParserElement.set_name`
-
- Diagnostics are enabled/disabled by calling :class:`enable_diag` and :class:`disable_diag`.
- All warnings can be enabled by calling :class:`enable_all_warnings`.
- """
-
- warn_multiple_tokens_in_named_alternation = 0
- warn_ungrouped_named_tokens_in_collection = 1
- warn_name_set_on_empty_Forward = 2
- warn_on_parse_using_empty_Forward = 3
- warn_on_assignment_to_Forward = 4
- warn_on_multiple_string_args_to_oneof = 5
- warn_on_match_first_with_lshift_operator = 6
- enable_debug_on_named_expressions = 7
-
-
-def enable_diag(diag_enum: Diagnostics) -> None:
- """
- Enable a global pyparsing diagnostic flag (see :class:`Diagnostics`).
- """
- __diag__.enable(diag_enum.name)
-
-
-def disable_diag(diag_enum: Diagnostics) -> None:
- """
- Disable a global pyparsing diagnostic flag (see :class:`Diagnostics`).
- """
- __diag__.disable(diag_enum.name)
-
-
-def enable_all_warnings() -> None:
- """
- Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`).
- """
- __diag__.enable_all_warnings()
-
-
-# hide abstract class
-del __config_flags
-
-
-def _should_enable_warnings(
- cmd_line_warn_options: typing.Iterable[str], warn_env_var: typing.Optional[str]
-) -> bool:
- enable = bool(warn_env_var)
- for warn_opt in cmd_line_warn_options:
- w_action, w_message, w_category, w_module, w_line = (warn_opt + "::::").split(
- ":"
- )[:5]
- if not w_action.lower().startswith("i") and (
- not (w_message or w_category or w_module) or w_module == "pyparsing"
- ):
- enable = True
- elif w_action.lower().startswith("i") and w_module in ("pyparsing", ""):
- enable = False
- return enable
-
-
-if _should_enable_warnings(
- sys.warnoptions, os.environ.get("PYPARSINGENABLEALLWARNINGS")
-):
- enable_all_warnings()
-
-
-# build list of single arg builtins, that can be used as parse actions
-_single_arg_builtins = {
- sum,
- len,
- sorted,
- reversed,
- list,
- tuple,
- set,
- any,
- all,
- min,
- max,
-}
-
-_generatorType = types.GeneratorType
-ParseAction = Union[
- Callable[[], Any],
- Callable[[ParseResults], Any],
- Callable[[int, ParseResults], Any],
- Callable[[str, int, ParseResults], Any],
-]
-ParseCondition = Union[
- Callable[[], bool],
- Callable[[ParseResults], bool],
- Callable[[int, ParseResults], bool],
- Callable[[str, int, ParseResults], bool],
-]
-ParseFailAction = Callable[[str, int, "ParserElement", Exception], None]
-DebugStartAction = Callable[[str, int, "ParserElement", bool], None]
-DebugSuccessAction = Callable[
- [str, int, int, "ParserElement", ParseResults, bool], None
-]
-DebugExceptionAction = Callable[[str, int, "ParserElement", Exception, bool], None]
-
-
-alphas = string.ascii_uppercase + string.ascii_lowercase
-identchars = pyparsing_unicode.Latin1.identchars
-identbodychars = pyparsing_unicode.Latin1.identbodychars
-nums = "0123456789"
-hexnums = nums + "ABCDEFabcdef"
-alphanums = alphas + nums
-printables = "".join([c for c in string.printable if c not in string.whitespace])
-
-_trim_arity_call_line: traceback.StackSummary = None
-
-
-def _trim_arity(func, max_limit=3):
- """decorator to trim function calls to match the arity of the target"""
- global _trim_arity_call_line
-
- if func in _single_arg_builtins:
- return lambda s, l, t: func(t)
-
- limit = 0
- found_arity = False
-
- def extract_tb(tb, limit=0):
- frames = traceback.extract_tb(tb, limit=limit)
- frame_summary = frames[-1]
- return [frame_summary[:2]]
-
- # synthesize what would be returned by traceback.extract_stack at the call to
- # user's parse action 'func', so that we don't incur call penalty at parse time
-
- # fmt: off
- LINE_DIFF = 7
- # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
- # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
- _trim_arity_call_line = (_trim_arity_call_line or traceback.extract_stack(limit=2)[-1])
- pa_call_line_synth = (_trim_arity_call_line[0], _trim_arity_call_line[1] + LINE_DIFF)
-
- def wrapper(*args):
- nonlocal found_arity, limit
- while 1:
- try:
- ret = func(*args[limit:])
- found_arity = True
- return ret
- except TypeError as te:
- # re-raise TypeErrors if they did not come from our arity testing
- if found_arity:
- raise
- else:
- tb = te.__traceback__
- trim_arity_type_error = (
- extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth
- )
- del tb
-
- if trim_arity_type_error:
- if limit < max_limit:
- limit += 1
- continue
-
- raise
- # fmt: on
-
- # copy func name to wrapper for sensible debug output
- # (can't use functools.wraps, since that messes with function signature)
- func_name = getattr(func, "__name__", getattr(func, "__class__").__name__)
- wrapper.__name__ = func_name
- wrapper.__doc__ = func.__doc__
-
- return wrapper
-
-
-def condition_as_parse_action(
- fn: ParseCondition, message: str = None, fatal: bool = False
-) -> ParseAction:
- """
- Function to convert a simple predicate function that returns ``True`` or ``False``
- into a parse action. Can be used in places when a parse action is required
- and :class:`ParserElement.add_condition` cannot be used (such as when adding a condition
- to an operator level in :class:`infix_notation`).
-
- Optional keyword arguments:
-
- - ``message`` - define a custom message to be used in the raised exception
- - ``fatal`` - if True, will raise :class:`ParseFatalException` to stop parsing immediately;
- otherwise will raise :class:`ParseException`
-
- """
- msg = message if message is not None else "failed user-defined condition"
- exc_type = ParseFatalException if fatal else ParseException
- fn = _trim_arity(fn)
-
- @wraps(fn)
- def pa(s, l, t):
- if not bool(fn(s, l, t)):
- raise exc_type(s, l, msg)
-
- return pa
-
-
-def _default_start_debug_action(
- instring: str, loc: int, expr: "ParserElement", cache_hit: bool = False
-):
- cache_hit_str = "*" if cache_hit else ""
- print(
- (
- "{}Match {} at loc {}({},{})\n {}\n {}^".format(
- cache_hit_str,
- expr,
- loc,
- lineno(loc, instring),
- col(loc, instring),
- line(loc, instring),
- " " * (col(loc, instring) - 1),
- )
- )
- )
-
-
-def _default_success_debug_action(
- instring: str,
- startloc: int,
- endloc: int,
- expr: "ParserElement",
- toks: ParseResults,
- cache_hit: bool = False,
-):
- cache_hit_str = "*" if cache_hit else ""
- print("{}Matched {} -> {}".format(cache_hit_str, expr, toks.as_list()))
-
-
-def _default_exception_debug_action(
- instring: str,
- loc: int,
- expr: "ParserElement",
- exc: Exception,
- cache_hit: bool = False,
-):
- cache_hit_str = "*" if cache_hit else ""
- print(
- "{}Match {} failed, {} raised: {}".format(
- cache_hit_str, expr, type(exc).__name__, exc
- )
- )
-
-
-def null_debug_action(*args):
- """'Do-nothing' debug action, to suppress debugging output during parsing."""
-
-
-class ParserElement(ABC):
- """Abstract base level parser element class."""
-
- DEFAULT_WHITE_CHARS: str = " \n\t\r"
- verbose_stacktrace: bool = False
- _literalStringClass: typing.Optional[type] = None
-
- @staticmethod
- def set_default_whitespace_chars(chars: str) -> None:
- r"""
- Overrides the default whitespace chars
-
- Example::
-
- # default whitespace chars are space, and newline
- Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
-
- # change to just treat newline as significant
- ParserElement.set_default_whitespace_chars(" \t")
- Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def']
- """
- ParserElement.DEFAULT_WHITE_CHARS = chars
-
- # update whitespace all parse expressions defined in this module
- for expr in _builtin_exprs:
- if expr.copyDefaultWhiteChars:
- expr.whiteChars = set(chars)
-
- @staticmethod
- def inline_literals_using(cls: type) -> None:
- """
- Set class to be used for inclusion of string literals into a parser.
-
- Example::
-
- # default literal class used is Literal
- integer = Word(nums)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- date_str.parse_string("1999/12/31") # -> ['1999', '/', '12', '/', '31']
-
-
- # change to Suppress
- ParserElement.inline_literals_using(Suppress)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- date_str.parse_string("1999/12/31") # -> ['1999', '12', '31']
- """
- ParserElement._literalStringClass = cls
-
- class DebugActions(NamedTuple):
- debug_try: typing.Optional[DebugStartAction]
- debug_match: typing.Optional[DebugSuccessAction]
- debug_fail: typing.Optional[DebugExceptionAction]
-
- def __init__(self, savelist: bool = False):
- self.parseAction: List[ParseAction] = list()
- self.failAction: typing.Optional[ParseFailAction] = None
- self.customName = None
- self._defaultName = None
- self.resultsName = None
- self.saveAsList = savelist
- self.skipWhitespace = True
- self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
- self.copyDefaultWhiteChars = True
- # used when checking for left-recursion
- self.mayReturnEmpty = False
- self.keepTabs = False
- self.ignoreExprs: List["ParserElement"] = list()
- self.debug = False
- self.streamlined = False
- # optimize exception handling for subclasses that don't advance parse index
- self.mayIndexError = True
- self.errmsg = ""
- # mark results names as modal (report only last) or cumulative (list all)
- self.modalResults = True
- # custom debug actions
- self.debugActions = self.DebugActions(None, None, None)
- # avoid redundant calls to preParse
- self.callPreparse = True
- self.callDuringTry = False
- self.suppress_warnings_: List[Diagnostics] = []
-
- def suppress_warning(self, warning_type: Diagnostics) -> "ParserElement":
- """
- Suppress warnings emitted for a particular diagnostic on this expression.
-
- Example::
-
- base = pp.Forward()
- base.suppress_warning(Diagnostics.warn_on_parse_using_empty_Forward)
-
- # statement would normally raise a warning, but is now suppressed
- print(base.parseString("x"))
-
- """
- self.suppress_warnings_.append(warning_type)
- return self
-
- def copy(self) -> "ParserElement":
- """
- Make a copy of this :class:`ParserElement`. Useful for defining
- different parse actions for the same parsing pattern, using copies of
- the original parse element.
-
- Example::
-
- integer = Word(nums).set_parse_action(lambda toks: int(toks[0]))
- integerK = integer.copy().add_parse_action(lambda toks: toks[0] * 1024) + Suppress("K")
- integerM = integer.copy().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
-
- print((integerK | integerM | integer)[1, ...].parse_string("5K 100 640K 256M"))
-
- prints::
-
- [5120, 100, 655360, 268435456]
-
- Equivalent form of ``expr.copy()`` is just ``expr()``::
-
- integerM = integer().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
- """
- cpy = copy.copy(self)
- cpy.parseAction = self.parseAction[:]
- cpy.ignoreExprs = self.ignoreExprs[:]
- if self.copyDefaultWhiteChars:
- cpy.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
- return cpy
-
- def set_results_name(
- self, name: str, list_all_matches: bool = False, *, listAllMatches: bool = False
- ) -> "ParserElement":
- """
- Define name for referencing matching tokens as a nested attribute
- of the returned parse results.
-
- Normally, results names are assigned as you would assign keys in a dict:
- any existing value is overwritten by later values. If it is necessary to
- keep all values captured for a particular results name, call ``set_results_name``
- with ``list_all_matches`` = True.
-
- NOTE: ``set_results_name`` returns a *copy* of the original :class:`ParserElement` object;
- this is so that the client can define a basic element, such as an
- integer, and reference it in multiple places with different names.
-
- You can also set results names using the abbreviated syntax,
- ``expr("name")`` in place of ``expr.set_results_name("name")``
- - see :class:`__call__`. If ``list_all_matches`` is required, use
- ``expr("name*")``.
-
- Example::
-
- date_str = (integer.set_results_name("year") + '/'
- + integer.set_results_name("month") + '/'
- + integer.set_results_name("day"))
-
- # equivalent form:
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
- """
- listAllMatches = listAllMatches or list_all_matches
- return self._setResultsName(name, listAllMatches)
-
- def _setResultsName(self, name, listAllMatches=False):
- if name is None:
- return self
- newself = self.copy()
- if name.endswith("*"):
- name = name[:-1]
- listAllMatches = True
- newself.resultsName = name
- newself.modalResults = not listAllMatches
- return newself
-
- def set_break(self, break_flag: bool = True) -> "ParserElement":
- """
- Method to invoke the Python pdb debugger when this element is
- about to be parsed. Set ``break_flag`` to ``True`` to enable, ``False`` to
- disable.
- """
- if break_flag:
- _parseMethod = self._parse
-
- def breaker(instring, loc, doActions=True, callPreParse=True):
- import pdb
-
- # this call to pdb.set_trace() is intentional, not a checkin error
- pdb.set_trace()
- return _parseMethod(instring, loc, doActions, callPreParse)
-
- breaker._originalParseMethod = _parseMethod
- self._parse = breaker
- else:
- if hasattr(self._parse, "_originalParseMethod"):
- self._parse = self._parse._originalParseMethod
- return self
-
- def set_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement":
- """
- Define one or more actions to perform when successfully matching parse element definition.
-
- Parse actions can be called to perform data conversions, do extra validation,
- update external data structures, or enhance or replace the parsed tokens.
- Each parse action ``fn`` is a callable method with 0-3 arguments, called as
- ``fn(s, loc, toks)`` , ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where:
-
- - s = the original string being parsed (see note below)
- - loc = the location of the matching substring
- - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object
-
- The parsed tokens are passed to the parse action as ParseResults. They can be
- modified in place using list-style append, extend, and pop operations to update
- the parsed list elements; and with dictionary-style item set and del operations
- to add, update, or remove any named results. If the tokens are modified in place,
- it is not necessary to return them with a return statement.
-
- Parse actions can also completely replace the given tokens, with another ``ParseResults``
- object, or with some entirely different object (common for parse actions that perform data
- conversions). A convenient way to build a new parse result is to define the values
- using a dict, and then create the return value using :class:`ParseResults.from_dict`.
-
- If None is passed as the ``fn`` parse action, all previously added parse actions for this
- expression are cleared.
-
- Optional keyword arguments:
-
- - call_during_try = (default= ``False``) indicate if parse action should be run during
- lookaheads and alternate testing. For parse actions that have side effects, it is
- important to only call the parse action once it is determined that it is being
- called as part of a successful parse. For parse actions that perform additional
- validation, then call_during_try should be passed as True, so that the validation
- code is included in the preliminary "try" parses.
-
- Note: the default parsing behavior is to expand tabs in the input string
- before starting the parsing process. See :class:`parse_string` for more
- information on parsing strings containing ```` s, and suggested
- methods to maintain a consistent view of the parsed string, the parse
- location, and line and column positions within the parsed string.
-
- Example::
-
- # parse dates in the form YYYY/MM/DD
-
- # use parse action to convert toks from str to int at parse time
- def convert_to_int(toks):
- return int(toks[0])
-
- # use a parse action to verify that the date is a valid date
- def is_valid_date(instring, loc, toks):
- from datetime import date
- year, month, day = toks[::2]
- try:
- date(year, month, day)
- except ValueError:
- raise ParseException(instring, loc, "invalid date given")
-
- integer = Word(nums)
- date_str = integer + '/' + integer + '/' + integer
-
- # add parse actions
- integer.set_parse_action(convert_to_int)
- date_str.set_parse_action(is_valid_date)
-
- # note that integer fields are now ints, not strings
- date_str.run_tests('''
- # successful parse - note that integer fields were converted to ints
- 1999/12/31
-
- # fail - invalid date
- 1999/13/31
- ''')
- """
- if list(fns) == [None]:
- self.parseAction = []
- else:
- if not all(callable(fn) for fn in fns):
- raise TypeError("parse actions must be callable")
- self.parseAction = [_trim_arity(fn) for fn in fns]
- self.callDuringTry = kwargs.get(
- "call_during_try", kwargs.get("callDuringTry", False)
- )
- return self
-
- def add_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement":
- """
- Add one or more parse actions to expression's list of parse actions. See :class:`set_parse_action`.
-
- See examples in :class:`copy`.
- """
- self.parseAction += [_trim_arity(fn) for fn in fns]
- self.callDuringTry = self.callDuringTry or kwargs.get(
- "call_during_try", kwargs.get("callDuringTry", False)
- )
- return self
-
- def add_condition(self, *fns: ParseCondition, **kwargs) -> "ParserElement":
- """Add a boolean predicate function to expression's list of parse actions. See
- :class:`set_parse_action` for function call signatures. Unlike ``set_parse_action``,
- functions passed to ``add_condition`` need to return boolean success/fail of the condition.
-
- Optional keyword arguments:
-
- - message = define a custom message to be used in the raised exception
- - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise
- ParseException
- - call_during_try = boolean to indicate if this method should be called during internal tryParse calls,
- default=False
-
- Example::
-
- integer = Word(nums).set_parse_action(lambda toks: int(toks[0]))
- year_int = integer.copy()
- year_int.add_condition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
- date_str = year_int + '/' + integer + '/' + integer
-
- result = date_str.parse_string("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0),
- (line:1, col:1)
- """
- for fn in fns:
- self.parseAction.append(
- condition_as_parse_action(
- fn, message=kwargs.get("message"), fatal=kwargs.get("fatal", False)
- )
- )
-
- self.callDuringTry = self.callDuringTry or kwargs.get(
- "call_during_try", kwargs.get("callDuringTry", False)
- )
- return self
-
- def set_fail_action(self, fn: ParseFailAction) -> "ParserElement":
- """
- Define action to perform if parsing fails at this expression.
- Fail acton fn is a callable function that takes the arguments
- ``fn(s, loc, expr, err)`` where:
-
- - s = string being parsed
- - loc = location where expression match was attempted and failed
- - expr = the parse expression that failed
- - err = the exception thrown
-
- The function returns no value. It may throw :class:`ParseFatalException`
- if it is desired to stop parsing immediately."""
- self.failAction = fn
- return self
-
- def _skipIgnorables(self, instring, loc):
- exprsFound = True
- while exprsFound:
- exprsFound = False
- for e in self.ignoreExprs:
- try:
- while 1:
- loc, dummy = e._parse(instring, loc)
- exprsFound = True
- except ParseException:
- pass
- return loc
-
- def preParse(self, instring, loc):
- if self.ignoreExprs:
- loc = self._skipIgnorables(instring, loc)
-
- if self.skipWhitespace:
- instrlen = len(instring)
- white_chars = self.whiteChars
- while loc < instrlen and instring[loc] in white_chars:
- loc += 1
-
- return loc
-
- def parseImpl(self, instring, loc, doActions=True):
- return loc, []
-
- def postParse(self, instring, loc, tokenlist):
- return tokenlist
-
- # @profile
- def _parseNoCache(
- self, instring, loc, doActions=True, callPreParse=True
- ) -> Tuple[int, ParseResults]:
- TRY, MATCH, FAIL = 0, 1, 2
- debugging = self.debug # and doActions)
- len_instring = len(instring)
-
- if debugging or self.failAction:
- # print("Match {} at loc {}({}, {})".format(self, loc, lineno(loc, instring), col(loc, instring)))
- try:
- if callPreParse and self.callPreparse:
- pre_loc = self.preParse(instring, loc)
- else:
- pre_loc = loc
- tokens_start = pre_loc
- if self.debugActions.debug_try:
- self.debugActions.debug_try(instring, tokens_start, self, False)
- if self.mayIndexError or pre_loc >= len_instring:
- try:
- loc, tokens = self.parseImpl(instring, pre_loc, doActions)
- except IndexError:
- raise ParseException(instring, len_instring, self.errmsg, self)
- else:
- loc, tokens = self.parseImpl(instring, pre_loc, doActions)
- except Exception as err:
- # print("Exception raised:", err)
- if self.debugActions.debug_fail:
- self.debugActions.debug_fail(
- instring, tokens_start, self, err, False
- )
- if self.failAction:
- self.failAction(instring, tokens_start, self, err)
- raise
- else:
- if callPreParse and self.callPreparse:
- pre_loc = self.preParse(instring, loc)
- else:
- pre_loc = loc
- tokens_start = pre_loc
- if self.mayIndexError or pre_loc >= len_instring:
- try:
- loc, tokens = self.parseImpl(instring, pre_loc, doActions)
- except IndexError:
- raise ParseException(instring, len_instring, self.errmsg, self)
- else:
- loc, tokens = self.parseImpl(instring, pre_loc, doActions)
-
- tokens = self.postParse(instring, loc, tokens)
-
- ret_tokens = ParseResults(
- tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults
- )
- if self.parseAction and (doActions or self.callDuringTry):
- if debugging:
- try:
- for fn in self.parseAction:
- try:
- tokens = fn(instring, tokens_start, ret_tokens)
- except IndexError as parse_action_exc:
- exc = ParseException("exception raised in parse action")
- raise exc from parse_action_exc
-
- if tokens is not None and tokens is not ret_tokens:
- ret_tokens = ParseResults(
- tokens,
- self.resultsName,
- asList=self.saveAsList
- and isinstance(tokens, (ParseResults, list)),
- modal=self.modalResults,
- )
- except Exception as err:
- # print "Exception raised in user parse action:", err
- if self.debugActions.debug_fail:
- self.debugActions.debug_fail(
- instring, tokens_start, self, err, False
- )
- raise
- else:
- for fn in self.parseAction:
- try:
- tokens = fn(instring, tokens_start, ret_tokens)
- except IndexError as parse_action_exc:
- exc = ParseException("exception raised in parse action")
- raise exc from parse_action_exc
-
- if tokens is not None and tokens is not ret_tokens:
- ret_tokens = ParseResults(
- tokens,
- self.resultsName,
- asList=self.saveAsList
- and isinstance(tokens, (ParseResults, list)),
- modal=self.modalResults,
- )
- if debugging:
- # print("Matched", self, "->", ret_tokens.as_list())
- if self.debugActions.debug_match:
- self.debugActions.debug_match(
- instring, tokens_start, loc, self, ret_tokens, False
- )
-
- return loc, ret_tokens
-
- def try_parse(self, instring: str, loc: int, raise_fatal: bool = False) -> int:
- try:
- return self._parse(instring, loc, doActions=False)[0]
- except ParseFatalException:
- if raise_fatal:
- raise
- raise ParseException(instring, loc, self.errmsg, self)
-
- def can_parse_next(self, instring: str, loc: int) -> bool:
- try:
- self.try_parse(instring, loc)
- except (ParseException, IndexError):
- return False
- else:
- return True
-
- # cache for left-recursion in Forward references
- recursion_lock = RLock()
- recursion_memos: typing.Dict[
- Tuple[int, "Forward", bool], Tuple[int, Union[ParseResults, Exception]]
- ] = {}
-
- # argument cache for optimizing repeated calls when backtracking through recursive expressions
- packrat_cache = (
- {}
- ) # this is set later by enabled_packrat(); this is here so that reset_cache() doesn't fail
- packrat_cache_lock = RLock()
- packrat_cache_stats = [0, 0]
-
- # this method gets repeatedly called during backtracking with the same arguments -
- # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
- def _parseCache(
- self, instring, loc, doActions=True, callPreParse=True
- ) -> Tuple[int, ParseResults]:
- HIT, MISS = 0, 1
- TRY, MATCH, FAIL = 0, 1, 2
- lookup = (self, instring, loc, callPreParse, doActions)
- with ParserElement.packrat_cache_lock:
- cache = ParserElement.packrat_cache
- value = cache.get(lookup)
- if value is cache.not_in_cache:
- ParserElement.packrat_cache_stats[MISS] += 1
- try:
- value = self._parseNoCache(instring, loc, doActions, callPreParse)
- except ParseBaseException as pe:
- # cache a copy of the exception, without the traceback
- cache.set(lookup, pe.__class__(*pe.args))
- raise
- else:
- cache.set(lookup, (value[0], value[1].copy(), loc))
- return value
- else:
- ParserElement.packrat_cache_stats[HIT] += 1
- if self.debug and self.debugActions.debug_try:
- try:
- self.debugActions.debug_try(instring, loc, self, cache_hit=True)
- except TypeError:
- pass
- if isinstance(value, Exception):
- if self.debug and self.debugActions.debug_fail:
- try:
- self.debugActions.debug_fail(
- instring, loc, self, value, cache_hit=True
- )
- except TypeError:
- pass
- raise value
-
- loc_, result, endloc = value[0], value[1].copy(), value[2]
- if self.debug and self.debugActions.debug_match:
- try:
- self.debugActions.debug_match(
- instring, loc_, endloc, self, result, cache_hit=True
- )
- except TypeError:
- pass
-
- return loc_, result
-
- _parse = _parseNoCache
-
- @staticmethod
- def reset_cache() -> None:
- ParserElement.packrat_cache.clear()
- ParserElement.packrat_cache_stats[:] = [0] * len(
- ParserElement.packrat_cache_stats
- )
- ParserElement.recursion_memos.clear()
-
- _packratEnabled = False
- _left_recursion_enabled = False
-
- @staticmethod
- def disable_memoization() -> None:
- """
- Disables active Packrat or Left Recursion parsing and their memoization
-
- This method also works if neither Packrat nor Left Recursion are enabled.
- This makes it safe to call before activating Packrat nor Left Recursion
- to clear any previous settings.
- """
- ParserElement.reset_cache()
- ParserElement._left_recursion_enabled = False
- ParserElement._packratEnabled = False
- ParserElement._parse = ParserElement._parseNoCache
-
- @staticmethod
- def enable_left_recursion(
- cache_size_limit: typing.Optional[int] = None, *, force=False
- ) -> None:
- """
- Enables "bounded recursion" parsing, which allows for both direct and indirect
- left-recursion. During parsing, left-recursive :class:`Forward` elements are
- repeatedly matched with a fixed recursion depth that is gradually increased
- until finding the longest match.
-
- Example::
-
- import pyparsing as pp
- pp.ParserElement.enable_left_recursion()
-
- E = pp.Forward("E")
- num = pp.Word(pp.nums)
- # match `num`, or `num '+' num`, or `num '+' num '+' num`, ...
- E <<= E + '+' - num | num
-
- print(E.parse_string("1+2+3"))
-
- Recursion search naturally memoizes matches of ``Forward`` elements and may
- thus skip reevaluation of parse actions during backtracking. This may break
- programs with parse actions which rely on strict ordering of side-effects.
-
- Parameters:
-
- - cache_size_limit - (default=``None``) - memoize at most this many
- ``Forward`` elements during matching; if ``None`` (the default),
- memoize all ``Forward`` elements.
-
- Bounded Recursion parsing works similar but not identical to Packrat parsing,
- thus the two cannot be used together. Use ``force=True`` to disable any
- previous, conflicting settings.
- """
- if force:
- ParserElement.disable_memoization()
- elif ParserElement._packratEnabled:
- raise RuntimeError("Packrat and Bounded Recursion are not compatible")
- if cache_size_limit is None:
- ParserElement.recursion_memos = _UnboundedMemo()
- elif cache_size_limit > 0:
- ParserElement.recursion_memos = _LRUMemo(capacity=cache_size_limit)
- else:
- raise NotImplementedError("Memo size of %s" % cache_size_limit)
- ParserElement._left_recursion_enabled = True
-
- @staticmethod
- def enable_packrat(cache_size_limit: int = 128, *, force: bool = False) -> None:
- """
- Enables "packrat" parsing, which adds memoizing to the parsing logic.
- Repeated parse attempts at the same string location (which happens
- often in many complex grammars) can immediately return a cached value,
- instead of re-executing parsing/validating code. Memoizing is done of
- both valid results and parsing exceptions.
-
- Parameters:
-
- - cache_size_limit - (default= ``128``) - if an integer value is provided
- will limit the size of the packrat cache; if None is passed, then
- the cache size will be unbounded; if 0 is passed, the cache will
- be effectively disabled.
-
- This speedup may break existing programs that use parse actions that
- have side-effects. For this reason, packrat parsing is disabled when
- you first import pyparsing. To activate the packrat feature, your
- program must call the class method :class:`ParserElement.enable_packrat`.
- For best results, call ``enable_packrat()`` immediately after
- importing pyparsing.
-
- Example::
-
- import pyparsing
- pyparsing.ParserElement.enable_packrat()
-
- Packrat parsing works similar but not identical to Bounded Recursion parsing,
- thus the two cannot be used together. Use ``force=True`` to disable any
- previous, conflicting settings.
- """
- if force:
- ParserElement.disable_memoization()
- elif ParserElement._left_recursion_enabled:
- raise RuntimeError("Packrat and Bounded Recursion are not compatible")
- if not ParserElement._packratEnabled:
- ParserElement._packratEnabled = True
- if cache_size_limit is None:
- ParserElement.packrat_cache = _UnboundedCache()
- else:
- ParserElement.packrat_cache = _FifoCache(cache_size_limit)
- ParserElement._parse = ParserElement._parseCache
-
- def parse_string(
- self, instring: str, parse_all: bool = False, *, parseAll: bool = False
- ) -> ParseResults:
- """
- Parse a string with respect to the parser definition. This function is intended as the primary interface to the
- client code.
-
- :param instring: The input string to be parsed.
- :param parse_all: If set, the entire input string must match the grammar.
- :param parseAll: retained for pre-PEP8 compatibility, will be removed in a future release.
- :raises ParseException: Raised if ``parse_all`` is set and the input string does not match the whole grammar.
- :returns: the parsed data as a :class:`ParseResults` object, which may be accessed as a `list`, a `dict`, or
- an object with attributes if the given parser includes results names.
-
- If the input string is required to match the entire grammar, ``parse_all`` flag must be set to ``True``. This
- is also equivalent to ending the grammar with :class:`StringEnd`().
-
- To report proper column numbers, ``parse_string`` operates on a copy of the input string where all tabs are
- converted to spaces (8 spaces per tab, as per the default in ``string.expandtabs``). If the input string
- contains tabs and the grammar uses parse actions that use the ``loc`` argument to index into the string
- being parsed, one can ensure a consistent view of the input string by doing one of the following:
-
- - calling ``parse_with_tabs`` on your grammar before calling ``parse_string`` (see :class:`parse_with_tabs`),
- - define your parse action using the full ``(s,loc,toks)`` signature, and reference the input string using the
- parse action's ``s`` argument, or
- - explicitly expand the tabs in your input string before calling ``parse_string``.
-
- Examples:
-
- By default, partial matches are OK.
-
- >>> res = Word('a').parse_string('aaaaabaaa')
- >>> print(res)
- ['aaaaa']
-
- The parsing behavior varies by the inheriting class of this abstract class. Please refer to the children
- directly to see more examples.
-
- It raises an exception if parse_all flag is set and instring does not match the whole grammar.
-
- >>> res = Word('a').parse_string('aaaaabaaa', parse_all=True)
- Traceback (most recent call last):
- ...
- pyparsing.ParseException: Expected end of text, found 'b' (at char 5), (line:1, col:6)
- """
- parseAll = parse_all or parseAll
-
- ParserElement.reset_cache()
- if not self.streamlined:
- self.streamline()
- for e in self.ignoreExprs:
- e.streamline()
- if not self.keepTabs:
- instring = instring.expandtabs()
- try:
- loc, tokens = self._parse(instring, 0)
- if parseAll:
- loc = self.preParse(instring, loc)
- se = Empty() + StringEnd()
- se._parse(instring, loc)
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clearing out pyparsing internal stack trace
- raise exc.with_traceback(None)
- else:
- return tokens
-
- def scan_string(
- self,
- instring: str,
- max_matches: int = _MAX_INT,
- overlap: bool = False,
- *,
- debug: bool = False,
- maxMatches: int = _MAX_INT,
- ) -> Generator[Tuple[ParseResults, int, int], None, None]:
- """
- Scan the input string for expression matches. Each match will return the
- matching tokens, start location, and end location. May be called with optional
- ``max_matches`` argument, to clip scanning after 'n' matches are found. If
- ``overlap`` is specified, then overlapping matches will be reported.
-
- Note that the start and end locations are reported relative to the string
- being parsed. See :class:`parse_string` for more information on parsing
- strings with embedded tabs.
-
- Example::
-
- source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
- print(source)
- for tokens, start, end in Word(alphas).scan_string(source):
- print(' '*start + '^'*(end-start))
- print(' '*start + tokens[0])
-
- prints::
-
- sldjf123lsdjjkf345sldkjf879lkjsfd987
- ^^^^^
- sldjf
- ^^^^^^^
- lsdjjkf
- ^^^^^^
- sldkjf
- ^^^^^^
- lkjsfd
- """
- maxMatches = min(maxMatches, max_matches)
- if not self.streamlined:
- self.streamline()
- for e in self.ignoreExprs:
- e.streamline()
-
- if not self.keepTabs:
- instring = str(instring).expandtabs()
- instrlen = len(instring)
- loc = 0
- preparseFn = self.preParse
- parseFn = self._parse
- ParserElement.resetCache()
- matches = 0
- try:
- while loc <= instrlen and matches < maxMatches:
- try:
- preloc = preparseFn(instring, loc)
- nextLoc, tokens = parseFn(instring, preloc, callPreParse=False)
- except ParseException:
- loc = preloc + 1
- else:
- if nextLoc > loc:
- matches += 1
- if debug:
- print(
- {
- "tokens": tokens.asList(),
- "start": preloc,
- "end": nextLoc,
- }
- )
- yield tokens, preloc, nextLoc
- if overlap:
- nextloc = preparseFn(instring, loc)
- if nextloc > loc:
- loc = nextLoc
- else:
- loc += 1
- else:
- loc = nextLoc
- else:
- loc = preloc + 1
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc.with_traceback(None)
-
- def transform_string(self, instring: str, *, debug: bool = False) -> str:
- """
- Extension to :class:`scan_string`, to modify matching text with modified tokens that may
- be returned from a parse action. To use ``transform_string``, define a grammar and
- attach a parse action to it that modifies the returned token list.
- Invoking ``transform_string()`` on a target string will then scan for matches,
- and replace the matched text patterns according to the logic in the parse
- action. ``transform_string()`` returns the resulting transformed string.
-
- Example::
-
- wd = Word(alphas)
- wd.set_parse_action(lambda toks: toks[0].title())
-
- print(wd.transform_string("now is the winter of our discontent made glorious summer by this sun of york."))
-
- prints::
-
- Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
- """
- out: List[str] = []
- lastE = 0
- # force preservation of s, to minimize unwanted transformation of string, and to
- # keep string locs straight between transform_string and scan_string
- self.keepTabs = True
- try:
- for t, s, e in self.scan_string(instring, debug=debug):
- out.append(instring[lastE:s])
- if t:
- if isinstance(t, ParseResults):
- out += t.as_list()
- elif isinstance(t, Iterable) and not isinstance(t, str_type):
- out.extend(t)
- else:
- out.append(t)
- lastE = e
- out.append(instring[lastE:])
- out = [o for o in out if o]
- return "".join([str(s) for s in _flatten(out)])
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc.with_traceback(None)
-
- def search_string(
- self,
- instring: str,
- max_matches: int = _MAX_INT,
- *,
- debug: bool = False,
- maxMatches: int = _MAX_INT,
- ) -> ParseResults:
- """
- Another extension to :class:`scan_string`, simplifying the access to the tokens found
- to match the given parse expression. May be called with optional
- ``max_matches`` argument, to clip searching after 'n' matches are found.
-
- Example::
-
- # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
- cap_word = Word(alphas.upper(), alphas.lower())
-
- print(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity"))
-
- # the sum() builtin can be used to merge results into a single ParseResults object
- print(sum(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity")))
-
- prints::
-
- [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
- ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
- """
- maxMatches = min(maxMatches, max_matches)
- try:
- return ParseResults(
- [t for t, s, e in self.scan_string(instring, maxMatches, debug=debug)]
- )
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc.with_traceback(None)
-
- def split(
- self,
- instring: str,
- maxsplit: int = _MAX_INT,
- include_separators: bool = False,
- *,
- includeSeparators=False,
- ) -> Generator[str, None, None]:
- """
- Generator method to split a string using the given expression as a separator.
- May be called with optional ``maxsplit`` argument, to limit the number of splits;
- and the optional ``include_separators`` argument (default= ``False``), if the separating
- matching text should be included in the split results.
-
- Example::
-
- punc = one_of(list(".,;:/-!?"))
- print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
-
- prints::
-
- ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
- """
- includeSeparators = includeSeparators or include_separators
- last = 0
- for t, s, e in self.scan_string(instring, max_matches=maxsplit):
- yield instring[last:s]
- if includeSeparators:
- yield t[0]
- last = e
- yield instring[last:]
-
- def __add__(self, other) -> "ParserElement":
- """
- Implementation of ``+`` operator - returns :class:`And`. Adding strings to a :class:`ParserElement`
- converts them to :class:`Literal`s by default.
-
- Example::
-
- greet = Word(alphas) + "," + Word(alphas) + "!"
- hello = "Hello, World!"
- print(hello, "->", greet.parse_string(hello))
-
- prints::
-
- Hello, World! -> ['Hello', ',', 'World', '!']
-
- ``...`` may be used as a parse expression as a short form of :class:`SkipTo`.
-
- Literal('start') + ... + Literal('end')
-
- is equivalent to:
-
- Literal('start') + SkipTo('end')("_skipped*") + Literal('end')
-
- Note that the skipped text is returned with '_skipped' as a results name,
- and to support having multiple skips in the same parser, the value returned is
- a list of all skipped text.
- """
- if other is Ellipsis:
- return _PendingSkip(self)
-
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- if not isinstance(other, ParserElement):
- raise TypeError(
- "Cannot combine element of type {} with ParserElement".format(
- type(other).__name__
- )
- )
- return And([self, other])
-
- def __radd__(self, other) -> "ParserElement":
- """
- Implementation of ``+`` operator when left operand is not a :class:`ParserElement`
- """
- if other is Ellipsis:
- return SkipTo(self)("_skipped*") + self
-
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- if not isinstance(other, ParserElement):
- raise TypeError(
- "Cannot combine element of type {} with ParserElement".format(
- type(other).__name__
- )
- )
- return other + self
-
- def __sub__(self, other) -> "ParserElement":
- """
- Implementation of ``-`` operator, returns :class:`And` with error stop
- """
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- if not isinstance(other, ParserElement):
- raise TypeError(
- "Cannot combine element of type {} with ParserElement".format(
- type(other).__name__
- )
- )
- return self + And._ErrorStop() + other
-
- def __rsub__(self, other) -> "ParserElement":
- """
- Implementation of ``-`` operator when left operand is not a :class:`ParserElement`
- """
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- if not isinstance(other, ParserElement):
- raise TypeError(
- "Cannot combine element of type {} with ParserElement".format(
- type(other).__name__
- )
- )
- return other - self
-
- def __mul__(self, other) -> "ParserElement":
- """
- Implementation of ``*`` operator, allows use of ``expr * 3`` in place of
- ``expr + expr + expr``. Expressions may also be multiplied by a 2-integer
- tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples
- may also include ``None`` as in:
- - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent
- to ``expr*n + ZeroOrMore(expr)``
- (read as "at least n instances of ``expr``")
- - ``expr*(None, n)`` is equivalent to ``expr*(0, n)``
- (read as "0 to n instances of ``expr``")
- - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)``
- - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)``
-
- Note that ``expr*(None, n)`` does not raise an exception if
- more than n exprs exist in the input stream; that is,
- ``expr*(None, n)`` does not enforce a maximum number of expr
- occurrences. If this behavior is desired, then write
- ``expr*(None, n) + ~expr``
- """
- if other is Ellipsis:
- other = (0, None)
- elif isinstance(other, tuple) and other[:1] == (Ellipsis,):
- other = ((0,) + other[1:] + (None,))[:2]
-
- if isinstance(other, int):
- minElements, optElements = other, 0
- elif isinstance(other, tuple):
- other = tuple(o if o is not Ellipsis else None for o in other)
- other = (other + (None, None))[:2]
- if other[0] is None:
- other = (0, other[1])
- if isinstance(other[0], int) and other[1] is None:
- if other[0] == 0:
- return ZeroOrMore(self)
- if other[0] == 1:
- return OneOrMore(self)
- else:
- return self * other[0] + ZeroOrMore(self)
- elif isinstance(other[0], int) and isinstance(other[1], int):
- minElements, optElements = other
- optElements -= minElements
- else:
- raise TypeError(
- "cannot multiply ParserElement and ({}) objects".format(
- ",".join(type(item).__name__ for item in other)
- )
- )
- else:
- raise TypeError(
- "cannot multiply ParserElement and {} objects".format(
- type(other).__name__
- )
- )
-
- if minElements < 0:
- raise ValueError("cannot multiply ParserElement by negative value")
- if optElements < 0:
- raise ValueError(
- "second tuple value must be greater or equal to first tuple value"
- )
- if minElements == optElements == 0:
- return And([])
-
- if optElements:
-
- def makeOptionalList(n):
- if n > 1:
- return Opt(self + makeOptionalList(n - 1))
- else:
- return Opt(self)
-
- if minElements:
- if minElements == 1:
- ret = self + makeOptionalList(optElements)
- else:
- ret = And([self] * minElements) + makeOptionalList(optElements)
- else:
- ret = makeOptionalList(optElements)
- else:
- if minElements == 1:
- ret = self
- else:
- ret = And([self] * minElements)
- return ret
-
- def __rmul__(self, other) -> "ParserElement":
- return self.__mul__(other)
-
- def __or__(self, other) -> "ParserElement":
- """
- Implementation of ``|`` operator - returns :class:`MatchFirst`
- """
- if other is Ellipsis:
- return _PendingSkip(self, must_skip=True)
-
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- if not isinstance(other, ParserElement):
- raise TypeError(
- "Cannot combine element of type {} with ParserElement".format(
- type(other).__name__
- )
- )
- return MatchFirst([self, other])
-
- def __ror__(self, other) -> "ParserElement":
- """
- Implementation of ``|`` operator when left operand is not a :class:`ParserElement`
- """
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- if not isinstance(other, ParserElement):
- raise TypeError(
- "Cannot combine element of type {} with ParserElement".format(
- type(other).__name__
- )
- )
- return other | self
-
- def __xor__(self, other) -> "ParserElement":
- """
- Implementation of ``^`` operator - returns :class:`Or`
- """
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- if not isinstance(other, ParserElement):
- raise TypeError(
- "Cannot combine element of type {} with ParserElement".format(
- type(other).__name__
- )
- )
- return Or([self, other])
-
- def __rxor__(self, other) -> "ParserElement":
- """
- Implementation of ``^`` operator when left operand is not a :class:`ParserElement`
- """
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- if not isinstance(other, ParserElement):
- raise TypeError(
- "Cannot combine element of type {} with ParserElement".format(
- type(other).__name__
- )
- )
- return other ^ self
-
- def __and__(self, other) -> "ParserElement":
- """
- Implementation of ``&`` operator - returns :class:`Each`
- """
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- if not isinstance(other, ParserElement):
- raise TypeError(
- "Cannot combine element of type {} with ParserElement".format(
- type(other).__name__
- )
- )
- return Each([self, other])
-
- def __rand__(self, other) -> "ParserElement":
- """
- Implementation of ``&`` operator when left operand is not a :class:`ParserElement`
- """
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- if not isinstance(other, ParserElement):
- raise TypeError(
- "Cannot combine element of type {} with ParserElement".format(
- type(other).__name__
- )
- )
- return other & self
-
- def __invert__(self) -> "ParserElement":
- """
- Implementation of ``~`` operator - returns :class:`NotAny`
- """
- return NotAny(self)
-
- # disable __iter__ to override legacy use of sequential access to __getitem__ to
- # iterate over a sequence
- __iter__ = None
-
- def __getitem__(self, key):
- """
- use ``[]`` indexing notation as a short form for expression repetition:
-
- - ``expr[n]`` is equivalent to ``expr*n``
- - ``expr[m, n]`` is equivalent to ``expr*(m, n)``
- - ``expr[n, ...]`` or ``expr[n,]`` is equivalent
- to ``expr*n + ZeroOrMore(expr)``
- (read as "at least n instances of ``expr``")
- - ``expr[..., n]`` is equivalent to ``expr*(0, n)``
- (read as "0 to n instances of ``expr``")
- - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)``
- - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)``
-
- ``None`` may be used in place of ``...``.
-
- Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception
- if more than ``n`` ``expr``s exist in the input stream. If this behavior is
- desired, then write ``expr[..., n] + ~expr``.
- """
-
- # convert single arg keys to tuples
- try:
- if isinstance(key, str_type):
- key = (key,)
- iter(key)
- except TypeError:
- key = (key, key)
-
- if len(key) > 2:
- raise TypeError(
- "only 1 or 2 index arguments supported ({}{})".format(
- key[:5], "... [{}]".format(len(key)) if len(key) > 5 else ""
- )
- )
-
- # clip to 2 elements
- ret = self * tuple(key[:2])
- return ret
-
- def __call__(self, name: str = None) -> "ParserElement":
- """
- Shortcut for :class:`set_results_name`, with ``list_all_matches=False``.
-
- If ``name`` is given with a trailing ``'*'`` character, then ``list_all_matches`` will be
- passed as ``True``.
-
- If ``name` is omitted, same as calling :class:`copy`.
-
- Example::
-
- # these are equivalent
- userdata = Word(alphas).set_results_name("name") + Word(nums + "-").set_results_name("socsecno")
- userdata = Word(alphas)("name") + Word(nums + "-")("socsecno")
- """
- if name is not None:
- return self._setResultsName(name)
- else:
- return self.copy()
-
- def suppress(self) -> "ParserElement":
- """
- Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from
- cluttering up returned output.
- """
- return Suppress(self)
-
- def ignore_whitespace(self, recursive: bool = True) -> "ParserElement":
- """
- Enables the skipping of whitespace before matching the characters in the
- :class:`ParserElement`'s defined pattern.
-
- :param recursive: If ``True`` (the default), also enable whitespace skipping in child elements (if any)
- """
- self.skipWhitespace = True
- return self
-
- def leave_whitespace(self, recursive: bool = True) -> "ParserElement":
- """
- Disables the skipping of whitespace before matching the characters in the
- :class:`ParserElement`'s defined pattern. This is normally only used internally by
- the pyparsing module, but may be needed in some whitespace-sensitive grammars.
-
- :param recursive: If true (the default), also disable whitespace skipping in child elements (if any)
- """
- self.skipWhitespace = False
- return self
-
- def set_whitespace_chars(
- self, chars: Union[Set[str], str], copy_defaults: bool = False
- ) -> "ParserElement":
- """
- Overrides the default whitespace chars
- """
- self.skipWhitespace = True
- self.whiteChars = set(chars)
- self.copyDefaultWhiteChars = copy_defaults
- return self
-
- def parse_with_tabs(self) -> "ParserElement":
- """
- Overrides default behavior to expand ```` s to spaces before parsing the input string.
- Must be called before ``parse_string`` when the input grammar contains elements that
- match ```` characters.
- """
- self.keepTabs = True
- return self
-
- def ignore(self, other: "ParserElement") -> "ParserElement":
- """
- Define expression to be ignored (e.g., comments) while doing pattern
- matching; may be called repeatedly, to define multiple comment or other
- ignorable patterns.
-
- Example::
-
- patt = Word(alphas)[1, ...]
- patt.parse_string('ablaj /* comment */ lskjd')
- # -> ['ablaj']
-
- patt.ignore(c_style_comment)
- patt.parse_string('ablaj /* comment */ lskjd')
- # -> ['ablaj', 'lskjd']
- """
- import typing
-
- if isinstance(other, str_type):
- other = Suppress(other)
-
- if isinstance(other, Suppress):
- if other not in self.ignoreExprs:
- self.ignoreExprs.append(other)
- else:
- self.ignoreExprs.append(Suppress(other.copy()))
- return self
-
- def set_debug_actions(
- self,
- start_action: DebugStartAction,
- success_action: DebugSuccessAction,
- exception_action: DebugExceptionAction,
- ) -> "ParserElement":
- """
- Customize display of debugging messages while doing pattern matching:
-
- - ``start_action`` - method to be called when an expression is about to be parsed;
- should have the signature ``fn(input_string: str, location: int, expression: ParserElement, cache_hit: bool)``
-
- - ``success_action`` - method to be called when an expression has successfully parsed;
- should have the signature ``fn(input_string: str, start_location: int, end_location: int, expression: ParserELement, parsed_tokens: ParseResults, cache_hit: bool)``
-
- - ``exception_action`` - method to be called when expression fails to parse;
- should have the signature ``fn(input_string: str, location: int, expression: ParserElement, exception: Exception, cache_hit: bool)``
- """
- self.debugActions = self.DebugActions(
- start_action or _default_start_debug_action,
- success_action or _default_success_debug_action,
- exception_action or _default_exception_debug_action,
- )
- self.debug = True
- return self
-
- def set_debug(self, flag: bool = True) -> "ParserElement":
- """
- Enable display of debugging messages while doing pattern matching.
- Set ``flag`` to ``True`` to enable, ``False`` to disable.
-
- Example::
-
- wd = Word(alphas).set_name("alphaword")
- integer = Word(nums).set_name("numword")
- term = wd | integer
-
- # turn on debugging for wd
- wd.set_debug()
-
- term[1, ...].parse_string("abc 123 xyz 890")
-
- prints::
-
- Match alphaword at loc 0(1,1)
- Matched alphaword -> ['abc']
- Match alphaword at loc 3(1,4)
- Exception raised:Expected alphaword (at char 4), (line:1, col:5)
- Match alphaword at loc 7(1,8)
- Matched alphaword -> ['xyz']
- Match alphaword at loc 11(1,12)
- Exception raised:Expected alphaword (at char 12), (line:1, col:13)
- Match alphaword at loc 15(1,16)
- Exception raised:Expected alphaword (at char 15), (line:1, col:16)
-
- The output shown is that produced by the default debug actions - custom debug actions can be
- specified using :class:`set_debug_actions`. Prior to attempting
- to match the ``wd`` expression, the debugging message ``"Match at loc (,
)"``
- is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"``
- message is shown. Also note the use of :class:`set_name` to assign a human-readable name to the expression,
- which makes debugging and exception messages easier to understand - for instance, the default
- name created for the :class:`Word` expression without calling ``set_name`` is ``"W:(A-Za-z)"``.
- """
- if flag:
- self.set_debug_actions(
- _default_start_debug_action,
- _default_success_debug_action,
- _default_exception_debug_action,
- )
- else:
- self.debug = False
- return self
-
- @property
- def default_name(self) -> str:
- if self._defaultName is None:
- self._defaultName = self._generateDefaultName()
- return self._defaultName
-
- @abstractmethod
- def _generateDefaultName(self):
- """
- Child classes must define this method, which defines how the ``default_name`` is set.
- """
-
- def set_name(self, name: str) -> "ParserElement":
- """
- Define name for this expression, makes debugging and exception messages clearer.
- Example::
- Word(nums).parse_string("ABC") # -> Exception: Expected W:(0-9) (at char 0), (line:1, col:1)
- Word(nums).set_name("integer").parse_string("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
- """
- self.customName = name
- self.errmsg = "Expected " + self.name
- if __diag__.enable_debug_on_named_expressions:
- self.set_debug()
- return self
-
- @property
- def name(self) -> str:
- # This will use a user-defined name if available, but otherwise defaults back to the auto-generated name
- return self.customName if self.customName is not None else self.default_name
-
- def __str__(self) -> str:
- return self.name
-
- def __repr__(self) -> str:
- return str(self)
-
- def streamline(self) -> "ParserElement":
- self.streamlined = True
- self._defaultName = None
- return self
-
- def recurse(self) -> Sequence["ParserElement"]:
- return []
-
- def _checkRecursion(self, parseElementList):
- subRecCheckList = parseElementList[:] + [self]
- for e in self.recurse():
- e._checkRecursion(subRecCheckList)
-
- def validate(self, validateTrace=None) -> None:
- """
- Check defined expressions for valid structure, check for infinite recursive definitions.
- """
- self._checkRecursion([])
-
- def parse_file(
- self,
- file_or_filename: Union[str, Path, TextIO],
- encoding: str = "utf-8",
- parse_all: bool = False,
- *,
- parseAll: bool = False,
- ) -> ParseResults:
- """
- Execute the parse expression on the given file or filename.
- If a filename is specified (instead of a file object),
- the entire file is opened, read, and closed before parsing.
- """
- parseAll = parseAll or parse_all
- try:
- file_contents = file_or_filename.read()
- except AttributeError:
- with open(file_or_filename, "r", encoding=encoding) as f:
- file_contents = f.read()
- try:
- return self.parse_string(file_contents, parseAll)
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc.with_traceback(None)
-
- def __eq__(self, other):
- if self is other:
- return True
- elif isinstance(other, str_type):
- return self.matches(other, parse_all=True)
- elif isinstance(other, ParserElement):
- return vars(self) == vars(other)
- return False
-
- def __hash__(self):
- return id(self)
-
- def matches(
- self, test_string: str, parse_all: bool = True, *, parseAll: bool = True
- ) -> bool:
- """
- Method for quick testing of a parser against a test string. Good for simple
- inline microtests of sub expressions while building up larger parser.
-
- Parameters:
- - ``test_string`` - to test against this expression for a match
- - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests
-
- Example::
-
- expr = Word(nums)
- assert expr.matches("100")
- """
- parseAll = parseAll and parse_all
- try:
- self.parse_string(str(test_string), parse_all=parseAll)
- return True
- except ParseBaseException:
- return False
-
- def run_tests(
- self,
- tests: Union[str, List[str]],
- parse_all: bool = True,
- comment: typing.Optional[Union["ParserElement", str]] = "#",
- full_dump: bool = True,
- print_results: bool = True,
- failure_tests: bool = False,
- post_parse: Callable[[str, ParseResults], str] = None,
- file: typing.Optional[TextIO] = None,
- with_line_numbers: bool = False,
- *,
- parseAll: bool = True,
- fullDump: bool = True,
- printResults: bool = True,
- failureTests: bool = False,
- postParse: Callable[[str, ParseResults], str] = None,
- ) -> Tuple[bool, List[Tuple[str, Union[ParseResults, Exception]]]]:
- """
- Execute the parse expression on a series of test strings, showing each
- test, the parsed results or where the parse failed. Quick and easy way to
- run a parse expression against a list of sample strings.
-
- Parameters:
- - ``tests`` - a list of separate test strings, or a multiline string of test strings
- - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests
- - ``comment`` - (default= ``'#'``) - expression for indicating embedded comments in the test
- string; pass None to disable comment filtering
- - ``full_dump`` - (default= ``True``) - dump results as list followed by results names in nested outline;
- if False, only dump nested list
- - ``print_results`` - (default= ``True``) prints test output to stdout
- - ``failure_tests`` - (default= ``False``) indicates if these tests are expected to fail parsing
- - ``post_parse`` - (default= ``None``) optional callback for successful parse results; called as
- `fn(test_string, parse_results)` and returns a string to be added to the test output
- - ``file`` - (default= ``None``) optional file-like object to which test output will be written;
- if None, will default to ``sys.stdout``
- - ``with_line_numbers`` - default= ``False``) show test strings with line and column numbers
-
- Returns: a (success, results) tuple, where success indicates that all tests succeeded
- (or failed if ``failure_tests`` is True), and the results contain a list of lines of each
- test's output
-
- Example::
-
- number_expr = pyparsing_common.number.copy()
-
- result = number_expr.run_tests('''
- # unsigned integer
- 100
- # negative integer
- -100
- # float with scientific notation
- 6.02e23
- # integer with scientific notation
- 1e-12
- ''')
- print("Success" if result[0] else "Failed!")
-
- result = number_expr.run_tests('''
- # stray character
- 100Z
- # missing leading digit before '.'
- -.100
- # too many '.'
- 3.14.159
- ''', failure_tests=True)
- print("Success" if result[0] else "Failed!")
-
- prints::
-
- # unsigned integer
- 100
- [100]
-
- # negative integer
- -100
- [-100]
-
- # float with scientific notation
- 6.02e23
- [6.02e+23]
-
- # integer with scientific notation
- 1e-12
- [1e-12]
-
- Success
-
- # stray character
- 100Z
- ^
- FAIL: Expected end of text (at char 3), (line:1, col:4)
-
- # missing leading digit before '.'
- -.100
- ^
- FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
-
- # too many '.'
- 3.14.159
- ^
- FAIL: Expected end of text (at char 4), (line:1, col:5)
-
- Success
-
- Each test string must be on a single line. If you want to test a string that spans multiple
- lines, create a test like this::
-
- expr.run_tests(r"this is a test\\n of strings that spans \\n 3 lines")
-
- (Note that this is a raw string literal, you must include the leading ``'r'``.)
- """
- from .testing import pyparsing_test
-
- parseAll = parseAll and parse_all
- fullDump = fullDump and full_dump
- printResults = printResults and print_results
- failureTests = failureTests or failure_tests
- postParse = postParse or post_parse
- if isinstance(tests, str_type):
- line_strip = type(tests).strip
- tests = [line_strip(test_line) for test_line in tests.rstrip().splitlines()]
- if isinstance(comment, str_type):
- comment = Literal(comment)
- if file is None:
- file = sys.stdout
- print_ = file.write
-
- result: Union[ParseResults, Exception]
- allResults = []
- comments = []
- success = True
- NL = Literal(r"\n").add_parse_action(replace_with("\n")).ignore(quoted_string)
- BOM = "\ufeff"
- for t in tests:
- if comment is not None and comment.matches(t, False) or comments and not t:
- comments.append(
- pyparsing_test.with_line_numbers(t) if with_line_numbers else t
- )
- continue
- if not t:
- continue
- out = [
- "\n" + "\n".join(comments) if comments else "",
- pyparsing_test.with_line_numbers(t) if with_line_numbers else t,
- ]
- comments = []
- try:
- # convert newline marks to actual newlines, and strip leading BOM if present
- t = NL.transform_string(t.lstrip(BOM))
- result = self.parse_string(t, parse_all=parseAll)
- except ParseBaseException as pe:
- fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
- out.append(pe.explain())
- out.append("FAIL: " + str(pe))
- if ParserElement.verbose_stacktrace:
- out.extend(traceback.format_tb(pe.__traceback__))
- success = success and failureTests
- result = pe
- except Exception as exc:
- out.append("FAIL-EXCEPTION: {}: {}".format(type(exc).__name__, exc))
- if ParserElement.verbose_stacktrace:
- out.extend(traceback.format_tb(exc.__traceback__))
- success = success and failureTests
- result = exc
- else:
- success = success and not failureTests
- if postParse is not None:
- try:
- pp_value = postParse(t, result)
- if pp_value is not None:
- if isinstance(pp_value, ParseResults):
- out.append(pp_value.dump())
- else:
- out.append(str(pp_value))
- else:
- out.append(result.dump())
- except Exception as e:
- out.append(result.dump(full=fullDump))
- out.append(
- "{} failed: {}: {}".format(
- postParse.__name__, type(e).__name__, e
- )
- )
- else:
- out.append(result.dump(full=fullDump))
- out.append("")
-
- if printResults:
- print_("\n".join(out))
-
- allResults.append((t, result))
-
- return success, allResults
-
- def create_diagram(
- self,
- output_html: Union[TextIO, Path, str],
- vertical: int = 3,
- show_results_names: bool = False,
- show_groups: bool = False,
- **kwargs,
- ) -> None:
- """
- Create a railroad diagram for the parser.
-
- Parameters:
- - output_html (str or file-like object) - output target for generated
- diagram HTML
- - vertical (int) - threshold for formatting multiple alternatives vertically
- instead of horizontally (default=3)
- - show_results_names - bool flag whether diagram should show annotations for
- defined results names
- - show_groups - bool flag whether groups should be highlighted with an unlabeled surrounding box
- Additional diagram-formatting keyword arguments can also be included;
- see railroad.Diagram class.
- """
-
- try:
- from .diagram import to_railroad, railroad_to_html
- except ImportError as ie:
- raise Exception(
- "must ``pip install pyparsing[diagrams]`` to generate parser railroad diagrams"
- ) from ie
-
- self.streamline()
-
- railroad = to_railroad(
- self,
- vertical=vertical,
- show_results_names=show_results_names,
- show_groups=show_groups,
- diagram_kwargs=kwargs,
- )
- if isinstance(output_html, (str, Path)):
- with open(output_html, "w", encoding="utf-8") as diag_file:
- diag_file.write(railroad_to_html(railroad))
- else:
- # we were passed a file-like object, just write to it
- output_html.write(railroad_to_html(railroad))
-
- setDefaultWhitespaceChars = set_default_whitespace_chars
- inlineLiteralsUsing = inline_literals_using
- setResultsName = set_results_name
- setBreak = set_break
- setParseAction = set_parse_action
- addParseAction = add_parse_action
- addCondition = add_condition
- setFailAction = set_fail_action
- tryParse = try_parse
- canParseNext = can_parse_next
- resetCache = reset_cache
- enableLeftRecursion = enable_left_recursion
- enablePackrat = enable_packrat
- parseString = parse_string
- scanString = scan_string
- searchString = search_string
- transformString = transform_string
- setWhitespaceChars = set_whitespace_chars
- parseWithTabs = parse_with_tabs
- setDebugActions = set_debug_actions
- setDebug = set_debug
- defaultName = default_name
- setName = set_name
- parseFile = parse_file
- runTests = run_tests
- ignoreWhitespace = ignore_whitespace
- leaveWhitespace = leave_whitespace
-
-
-class _PendingSkip(ParserElement):
- # internal placeholder class to hold a place were '...' is added to a parser element,
- # once another ParserElement is added, this placeholder will be replaced with a SkipTo
- def __init__(self, expr: ParserElement, must_skip: bool = False):
- super().__init__()
- self.anchor = expr
- self.must_skip = must_skip
-
- def _generateDefaultName(self):
- return str(self.anchor + Empty()).replace("Empty", "...")
-
- def __add__(self, other) -> "ParserElement":
- skipper = SkipTo(other).set_name("...")("_skipped*")
- if self.must_skip:
-
- def must_skip(t):
- if not t._skipped or t._skipped.as_list() == [""]:
- del t[0]
- t.pop("_skipped", None)
-
- def show_skip(t):
- if t._skipped.as_list()[-1:] == [""]:
- t.pop("_skipped")
- t["_skipped"] = "missing <" + repr(self.anchor) + ">"
-
- return (
- self.anchor + skipper().add_parse_action(must_skip)
- | skipper().add_parse_action(show_skip)
- ) + other
-
- return self.anchor + skipper + other
-
- def __repr__(self):
- return self.defaultName
-
- def parseImpl(self, *args):
- raise Exception(
- "use of `...` expression without following SkipTo target expression"
- )
-
-
-class Token(ParserElement):
- """Abstract :class:`ParserElement` subclass, for defining atomic
- matching patterns.
- """
-
- def __init__(self):
- super().__init__(savelist=False)
-
- def _generateDefaultName(self):
- return type(self).__name__
-
-
-class Empty(Token):
- """
- An empty token, will always match.
- """
-
- def __init__(self):
- super().__init__()
- self.mayReturnEmpty = True
- self.mayIndexError = False
-
-
-class NoMatch(Token):
- """
- A token that will never match.
- """
-
- def __init__(self):
- super().__init__()
- self.mayReturnEmpty = True
- self.mayIndexError = False
- self.errmsg = "Unmatchable token"
-
- def parseImpl(self, instring, loc, doActions=True):
- raise ParseException(instring, loc, self.errmsg, self)
-
-
-class Literal(Token):
- """
- Token to exactly match a specified string.
-
- Example::
-
- Literal('blah').parse_string('blah') # -> ['blah']
- Literal('blah').parse_string('blahfooblah') # -> ['blah']
- Literal('blah').parse_string('bla') # -> Exception: Expected "blah"
-
- For case-insensitive matching, use :class:`CaselessLiteral`.
-
- For keyword matching (force word break before and after the matched string),
- use :class:`Keyword` or :class:`CaselessKeyword`.
- """
-
- def __init__(self, match_string: str = "", *, matchString: str = ""):
- super().__init__()
- match_string = matchString or match_string
- self.match = match_string
- self.matchLen = len(match_string)
- try:
- self.firstMatchChar = match_string[0]
- except IndexError:
- raise ValueError("null string passed to Literal; use Empty() instead")
- self.errmsg = "Expected " + self.name
- self.mayReturnEmpty = False
- self.mayIndexError = False
-
- # Performance tuning: modify __class__ to select
- # a parseImpl optimized for single-character check
- if self.matchLen == 1 and type(self) is Literal:
- self.__class__ = _SingleCharLiteral
-
- def _generateDefaultName(self):
- return repr(self.match)
-
- def parseImpl(self, instring, loc, doActions=True):
- if instring[loc] == self.firstMatchChar and instring.startswith(
- self.match, loc
- ):
- return loc + self.matchLen, self.match
- raise ParseException(instring, loc, self.errmsg, self)
-
-
-class _SingleCharLiteral(Literal):
- def parseImpl(self, instring, loc, doActions=True):
- if instring[loc] == self.firstMatchChar:
- return loc + 1, self.match
- raise ParseException(instring, loc, self.errmsg, self)
-
-
-ParserElement._literalStringClass = Literal
-
-
-class Keyword(Token):
- """
- Token to exactly match a specified string as a keyword, that is,
- it must be immediately followed by a non-keyword character. Compare
- with :class:`Literal`:
-
- - ``Literal("if")`` will match the leading ``'if'`` in
- ``'ifAndOnlyIf'``.
- - ``Keyword("if")`` will not; it will only match the leading
- ``'if'`` in ``'if x=1'``, or ``'if(y==2)'``
-
- Accepts two optional constructor arguments in addition to the
- keyword string:
-
- - ``identChars`` is a string of characters that would be valid
- identifier characters, defaulting to all alphanumerics + "_" and
- "$"
- - ``caseless`` allows case-insensitive matching, default is ``False``.
-
- Example::
-
- Keyword("start").parse_string("start") # -> ['start']
- Keyword("start").parse_string("starting") # -> Exception
-
- For case-insensitive matching, use :class:`CaselessKeyword`.
- """
-
- DEFAULT_KEYWORD_CHARS = alphanums + "_$"
-
- def __init__(
- self,
- match_string: str = "",
- ident_chars: typing.Optional[str] = None,
- caseless: bool = False,
- *,
- matchString: str = "",
- identChars: typing.Optional[str] = None,
- ):
- super().__init__()
- identChars = identChars or ident_chars
- if identChars is None:
- identChars = Keyword.DEFAULT_KEYWORD_CHARS
- match_string = matchString or match_string
- self.match = match_string
- self.matchLen = len(match_string)
- try:
- self.firstMatchChar = match_string[0]
- except IndexError:
- raise ValueError("null string passed to Keyword; use Empty() instead")
- self.errmsg = "Expected {} {}".format(type(self).__name__, self.name)
- self.mayReturnEmpty = False
- self.mayIndexError = False
- self.caseless = caseless
- if caseless:
- self.caselessmatch = match_string.upper()
- identChars = identChars.upper()
- self.identChars = set(identChars)
-
- def _generateDefaultName(self):
- return repr(self.match)
-
- def parseImpl(self, instring, loc, doActions=True):
- errmsg = self.errmsg
- errloc = loc
- if self.caseless:
- if instring[loc : loc + self.matchLen].upper() == self.caselessmatch:
- if loc == 0 or instring[loc - 1].upper() not in self.identChars:
- if (
- loc >= len(instring) - self.matchLen
- or instring[loc + self.matchLen].upper() not in self.identChars
- ):
- return loc + self.matchLen, self.match
- else:
- # followed by keyword char
- errmsg += ", was immediately followed by keyword character"
- errloc = loc + self.matchLen
- else:
- # preceded by keyword char
- errmsg += ", keyword was immediately preceded by keyword character"
- errloc = loc - 1
- # else no match just raise plain exception
-
- else:
- if (
- instring[loc] == self.firstMatchChar
- and self.matchLen == 1
- or instring.startswith(self.match, loc)
- ):
- if loc == 0 or instring[loc - 1] not in self.identChars:
- if (
- loc >= len(instring) - self.matchLen
- or instring[loc + self.matchLen] not in self.identChars
- ):
- return loc + self.matchLen, self.match
- else:
- # followed by keyword char
- errmsg += (
- ", keyword was immediately followed by keyword character"
- )
- errloc = loc + self.matchLen
- else:
- # preceded by keyword char
- errmsg += ", keyword was immediately preceded by keyword character"
- errloc = loc - 1
- # else no match just raise plain exception
-
- raise ParseException(instring, errloc, errmsg, self)
-
- @staticmethod
- def set_default_keyword_chars(chars) -> None:
- """
- Overrides the default characters used by :class:`Keyword` expressions.
- """
- Keyword.DEFAULT_KEYWORD_CHARS = chars
-
- setDefaultKeywordChars = set_default_keyword_chars
-
-
-class CaselessLiteral(Literal):
- """
- Token to match a specified string, ignoring case of letters.
- Note: the matched results will always be in the case of the given
- match string, NOT the case of the input text.
-
- Example::
-
- CaselessLiteral("CMD")[1, ...].parse_string("cmd CMD Cmd10")
- # -> ['CMD', 'CMD', 'CMD']
-
- (Contrast with example for :class:`CaselessKeyword`.)
- """
-
- def __init__(self, match_string: str = "", *, matchString: str = ""):
- match_string = matchString or match_string
- super().__init__(match_string.upper())
- # Preserve the defining literal.
- self.returnString = match_string
- self.errmsg = "Expected " + self.name
-
- def parseImpl(self, instring, loc, doActions=True):
- if instring[loc : loc + self.matchLen].upper() == self.match:
- return loc + self.matchLen, self.returnString
- raise ParseException(instring, loc, self.errmsg, self)
-
-
-class CaselessKeyword(Keyword):
- """
- Caseless version of :class:`Keyword`.
-
- Example::
-
- CaselessKeyword("CMD")[1, ...].parse_string("cmd CMD Cmd10")
- # -> ['CMD', 'CMD']
-
- (Contrast with example for :class:`CaselessLiteral`.)
- """
-
- def __init__(
- self,
- match_string: str = "",
- ident_chars: typing.Optional[str] = None,
- *,
- matchString: str = "",
- identChars: typing.Optional[str] = None,
- ):
- identChars = identChars or ident_chars
- match_string = matchString or match_string
- super().__init__(match_string, identChars, caseless=True)
-
-
-class CloseMatch(Token):
- """A variation on :class:`Literal` which matches "close" matches,
- that is, strings with at most 'n' mismatching characters.
- :class:`CloseMatch` takes parameters:
-
- - ``match_string`` - string to be matched
- - ``caseless`` - a boolean indicating whether to ignore casing when comparing characters
- - ``max_mismatches`` - (``default=1``) maximum number of
- mismatches allowed to count as a match
-
- The results from a successful parse will contain the matched text
- from the input string and the following named results:
-
- - ``mismatches`` - a list of the positions within the
- match_string where mismatches were found
- - ``original`` - the original match_string used to compare
- against the input string
-
- If ``mismatches`` is an empty list, then the match was an exact
- match.
-
- Example::
-
- patt = CloseMatch("ATCATCGAATGGA")
- patt.parse_string("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
- patt.parse_string("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
-
- # exact match
- patt.parse_string("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
-
- # close match allowing up to 2 mismatches
- patt = CloseMatch("ATCATCGAATGGA", max_mismatches=2)
- patt.parse_string("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
- """
-
- def __init__(
- self,
- match_string: str,
- max_mismatches: int = None,
- *,
- maxMismatches: int = 1,
- caseless=False,
- ):
- maxMismatches = max_mismatches if max_mismatches is not None else maxMismatches
- super().__init__()
- self.match_string = match_string
- self.maxMismatches = maxMismatches
- self.errmsg = "Expected {!r} (with up to {} mismatches)".format(
- self.match_string, self.maxMismatches
- )
- self.caseless = caseless
- self.mayIndexError = False
- self.mayReturnEmpty = False
-
- def _generateDefaultName(self):
- return "{}:{!r}".format(type(self).__name__, self.match_string)
-
- def parseImpl(self, instring, loc, doActions=True):
- start = loc
- instrlen = len(instring)
- maxloc = start + len(self.match_string)
-
- if maxloc <= instrlen:
- match_string = self.match_string
- match_stringloc = 0
- mismatches = []
- maxMismatches = self.maxMismatches
-
- for match_stringloc, s_m in enumerate(
- zip(instring[loc:maxloc], match_string)
- ):
- src, mat = s_m
- if self.caseless:
- src, mat = src.lower(), mat.lower()
-
- if src != mat:
- mismatches.append(match_stringloc)
- if len(mismatches) > maxMismatches:
- break
- else:
- loc = start + match_stringloc + 1
- results = ParseResults([instring[start:loc]])
- results["original"] = match_string
- results["mismatches"] = mismatches
- return loc, results
-
- raise ParseException(instring, loc, self.errmsg, self)
-
-
-class Word(Token):
- """Token for matching words composed of allowed character sets.
- Parameters:
- - ``init_chars`` - string of all characters that should be used to
- match as a word; "ABC" will match "AAA", "ABAB", "CBAC", etc.;
- if ``body_chars`` is also specified, then this is the string of
- initial characters
- - ``body_chars`` - string of characters that
- can be used for matching after a matched initial character as
- given in ``init_chars``; if omitted, same as the initial characters
- (default=``None``)
- - ``min`` - minimum number of characters to match (default=1)
- - ``max`` - maximum number of characters to match (default=0)
- - ``exact`` - exact number of characters to match (default=0)
- - ``as_keyword`` - match as a keyword (default=``False``)
- - ``exclude_chars`` - characters that might be
- found in the input ``body_chars`` string but which should not be
- accepted for matching ;useful to define a word of all
- printables except for one or two characters, for instance
- (default=``None``)
-
- :class:`srange` is useful for defining custom character set strings
- for defining :class:`Word` expressions, using range notation from
- regular expression character sets.
-
- A common mistake is to use :class:`Word` to match a specific literal
- string, as in ``Word("Address")``. Remember that :class:`Word`
- uses the string argument to define *sets* of matchable characters.
- This expression would match "Add", "AAA", "dAred", or any other word
- made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an
- exact literal string, use :class:`Literal` or :class:`Keyword`.
-
- pyparsing includes helper strings for building Words:
-
- - :class:`alphas`
- - :class:`nums`
- - :class:`alphanums`
- - :class:`hexnums`
- - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255
- - accented, tilded, umlauted, etc.)
- - :class:`punc8bit` (non-alphabetic characters in ASCII range
- 128-255 - currency, symbols, superscripts, diacriticals, etc.)
- - :class:`printables` (any non-whitespace character)
-
- ``alphas``, ``nums``, and ``printables`` are also defined in several
- Unicode sets - see :class:`pyparsing_unicode``.
-
- Example::
-
- # a word composed of digits
- integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
-
- # a word with a leading capital, and zero or more lowercase
- capital_word = Word(alphas.upper(), alphas.lower())
-
- # hostnames are alphanumeric, with leading alpha, and '-'
- hostname = Word(alphas, alphanums + '-')
-
- # roman numeral (not a strict parser, accepts invalid mix of characters)
- roman = Word("IVXLCDM")
-
- # any string of non-whitespace characters, except for ','
- csv_value = Word(printables, exclude_chars=",")
- """
-
- def __init__(
- self,
- init_chars: str = "",
- body_chars: typing.Optional[str] = None,
- min: int = 1,
- max: int = 0,
- exact: int = 0,
- as_keyword: bool = False,
- exclude_chars: typing.Optional[str] = None,
- *,
- initChars: typing.Optional[str] = None,
- bodyChars: typing.Optional[str] = None,
- asKeyword: bool = False,
- excludeChars: typing.Optional[str] = None,
- ):
- initChars = initChars or init_chars
- bodyChars = bodyChars or body_chars
- asKeyword = asKeyword or as_keyword
- excludeChars = excludeChars or exclude_chars
- super().__init__()
- if not initChars:
- raise ValueError(
- "invalid {}, initChars cannot be empty string".format(
- type(self).__name__
- )
- )
-
- initChars = set(initChars)
- self.initChars = initChars
- if excludeChars:
- excludeChars = set(excludeChars)
- initChars -= excludeChars
- if bodyChars:
- bodyChars = set(bodyChars) - excludeChars
- self.initCharsOrig = "".join(sorted(initChars))
-
- if bodyChars:
- self.bodyCharsOrig = "".join(sorted(bodyChars))
- self.bodyChars = set(bodyChars)
- else:
- self.bodyCharsOrig = "".join(sorted(initChars))
- self.bodyChars = set(initChars)
-
- self.maxSpecified = max > 0
-
- if min < 1:
- raise ValueError(
- "cannot specify a minimum length < 1; use Opt(Word()) if zero-length word is permitted"
- )
-
- self.minLen = min
-
- if max > 0:
- self.maxLen = max
- else:
- self.maxLen = _MAX_INT
-
- if exact > 0:
- self.maxLen = exact
- self.minLen = exact
-
- self.errmsg = "Expected " + self.name
- self.mayIndexError = False
- self.asKeyword = asKeyword
-
- # see if we can make a regex for this Word
- if " " not in self.initChars | self.bodyChars and (min == 1 and exact == 0):
- if self.bodyChars == self.initChars:
- if max == 0:
- repeat = "+"
- elif max == 1:
- repeat = ""
- else:
- repeat = "{{{},{}}}".format(
- self.minLen, "" if self.maxLen == _MAX_INT else self.maxLen
- )
- self.reString = "[{}]{}".format(
- _collapse_string_to_ranges(self.initChars),
- repeat,
- )
- elif len(self.initChars) == 1:
- if max == 0:
- repeat = "*"
- else:
- repeat = "{{0,{}}}".format(max - 1)
- self.reString = "{}[{}]{}".format(
- re.escape(self.initCharsOrig),
- _collapse_string_to_ranges(self.bodyChars),
- repeat,
- )
- else:
- if max == 0:
- repeat = "*"
- elif max == 2:
- repeat = ""
- else:
- repeat = "{{0,{}}}".format(max - 1)
- self.reString = "[{}][{}]{}".format(
- _collapse_string_to_ranges(self.initChars),
- _collapse_string_to_ranges(self.bodyChars),
- repeat,
- )
- if self.asKeyword:
- self.reString = r"\b" + self.reString + r"\b"
-
- try:
- self.re = re.compile(self.reString)
- except re.error:
- self.re = None
- else:
- self.re_match = self.re.match
- self.__class__ = _WordRegex
-
- def _generateDefaultName(self):
- def charsAsStr(s):
- max_repr_len = 16
- s = _collapse_string_to_ranges(s, re_escape=False)
- if len(s) > max_repr_len:
- return s[: max_repr_len - 3] + "..."
- else:
- return s
-
- if self.initChars != self.bodyChars:
- base = "W:({}, {})".format(
- charsAsStr(self.initChars), charsAsStr(self.bodyChars)
- )
- else:
- base = "W:({})".format(charsAsStr(self.initChars))
-
- # add length specification
- if self.minLen > 1 or self.maxLen != _MAX_INT:
- if self.minLen == self.maxLen:
- if self.minLen == 1:
- return base[2:]
- else:
- return base + "{{{}}}".format(self.minLen)
- elif self.maxLen == _MAX_INT:
- return base + "{{{},...}}".format(self.minLen)
- else:
- return base + "{{{},{}}}".format(self.minLen, self.maxLen)
- return base
-
- def parseImpl(self, instring, loc, doActions=True):
- if instring[loc] not in self.initChars:
- raise ParseException(instring, loc, self.errmsg, self)
-
- start = loc
- loc += 1
- instrlen = len(instring)
- bodychars = self.bodyChars
- maxloc = start + self.maxLen
- maxloc = min(maxloc, instrlen)
- while loc < maxloc and instring[loc] in bodychars:
- loc += 1
-
- throwException = False
- if loc - start < self.minLen:
- throwException = True
- elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
- throwException = True
- elif self.asKeyword:
- if (
- start > 0
- and instring[start - 1] in bodychars
- or loc < instrlen
- and instring[loc] in bodychars
- ):
- throwException = True
-
- if throwException:
- raise ParseException(instring, loc, self.errmsg, self)
-
- return loc, instring[start:loc]
-
-
-class _WordRegex(Word):
- def parseImpl(self, instring, loc, doActions=True):
- result = self.re_match(instring, loc)
- if not result:
- raise ParseException(instring, loc, self.errmsg, self)
-
- loc = result.end()
- return loc, result.group()
-
-
-class Char(_WordRegex):
- """A short-cut class for defining :class:`Word` ``(characters, exact=1)``,
- when defining a match of any single character in a string of
- characters.
- """
-
- def __init__(
- self,
- charset: str,
- as_keyword: bool = False,
- exclude_chars: typing.Optional[str] = None,
- *,
- asKeyword: bool = False,
- excludeChars: typing.Optional[str] = None,
- ):
- asKeyword = asKeyword or as_keyword
- excludeChars = excludeChars or exclude_chars
- super().__init__(
- charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars
- )
- self.reString = "[{}]".format(_collapse_string_to_ranges(self.initChars))
- if asKeyword:
- self.reString = r"\b{}\b".format(self.reString)
- self.re = re.compile(self.reString)
- self.re_match = self.re.match
-
-
-class Regex(Token):
- r"""Token for matching strings that match a given regular
- expression. Defined with string specifying the regular expression in
- a form recognized by the stdlib Python `re module `_.
- If the given regex contains named groups (defined using ``(?P...)``),
- these will be preserved as named :class:`ParseResults`.
-
- If instead of the Python stdlib ``re`` module you wish to use a different RE module
- (such as the ``regex`` module), you can do so by building your ``Regex`` object with
- a compiled RE that was compiled using ``regex``.
-
- Example::
-
- realnum = Regex(r"[+-]?\d+\.\d*")
- # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
- roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
-
- # named fields in a regex will be returned as named results
- date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)')
-
- # the Regex class will accept re's compiled using the regex module
- import regex
- parser = pp.Regex(regex.compile(r'[0-9]'))
- """
-
- def __init__(
- self,
- pattern: Any,
- flags: Union[re.RegexFlag, int] = 0,
- as_group_list: bool = False,
- as_match: bool = False,
- *,
- asGroupList: bool = False,
- asMatch: bool = False,
- ):
- """The parameters ``pattern`` and ``flags`` are passed
- to the ``re.compile()`` function as-is. See the Python
- `re module `_ module for an
- explanation of the acceptable patterns and flags.
- """
- super().__init__()
- asGroupList = asGroupList or as_group_list
- asMatch = asMatch or as_match
-
- if isinstance(pattern, str_type):
- if not pattern:
- raise ValueError("null string passed to Regex; use Empty() instead")
-
- self._re = None
- self.reString = self.pattern = pattern
- self.flags = flags
-
- elif hasattr(pattern, "pattern") and hasattr(pattern, "match"):
- self._re = pattern
- self.pattern = self.reString = pattern.pattern
- self.flags = flags
-
- else:
- raise TypeError(
- "Regex may only be constructed with a string or a compiled RE object"
- )
-
- self.errmsg = "Expected " + self.name
- self.mayIndexError = False
- self.asGroupList = asGroupList
- self.asMatch = asMatch
- if self.asGroupList:
- self.parseImpl = self.parseImplAsGroupList
- if self.asMatch:
- self.parseImpl = self.parseImplAsMatch
-
- @cached_property
- def re(self):
- if self._re:
- return self._re
- else:
- try:
- return re.compile(self.pattern, self.flags)
- except re.error:
- raise ValueError(
- "invalid pattern ({!r}) passed to Regex".format(self.pattern)
- )
-
- @cached_property
- def re_match(self):
- return self.re.match
-
- @cached_property
- def mayReturnEmpty(self):
- return self.re_match("") is not None
-
- def _generateDefaultName(self):
- return "Re:({})".format(repr(self.pattern).replace("\\\\", "\\"))
-
- def parseImpl(self, instring, loc, doActions=True):
- result = self.re_match(instring, loc)
- if not result:
- raise ParseException(instring, loc, self.errmsg, self)
-
- loc = result.end()
- ret = ParseResults(result.group())
- d = result.groupdict()
- if d:
- for k, v in d.items():
- ret[k] = v
- return loc, ret
-
- def parseImplAsGroupList(self, instring, loc, doActions=True):
- result = self.re_match(instring, loc)
- if not result:
- raise ParseException(instring, loc, self.errmsg, self)
-
- loc = result.end()
- ret = result.groups()
- return loc, ret
-
- def parseImplAsMatch(self, instring, loc, doActions=True):
- result = self.re_match(instring, loc)
- if not result:
- raise ParseException(instring, loc, self.errmsg, self)
-
- loc = result.end()
- ret = result
- return loc, ret
-
- def sub(self, repl: str) -> ParserElement:
- r"""
- Return :class:`Regex` with an attached parse action to transform the parsed
- result as if called using `re.sub(expr, repl, string) `_.
-
- Example::
-
- make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2\1>")
- print(make_html.transform_string("h1:main title:"))
- # prints "
main title
"
- """
- if self.asGroupList:
- raise TypeError("cannot use sub() with Regex(asGroupList=True)")
-
- if self.asMatch and callable(repl):
- raise TypeError("cannot use sub() with a callable with Regex(asMatch=True)")
-
- if self.asMatch:
-
- def pa(tokens):
- return tokens[0].expand(repl)
-
- else:
-
- def pa(tokens):
- return self.re.sub(repl, tokens[0])
-
- return self.add_parse_action(pa)
-
-
-class QuotedString(Token):
- r"""
- Token for matching strings that are delimited by quoting characters.
-
- Defined with the following parameters:
-
- - ``quote_char`` - string of one or more characters defining the
- quote delimiting string
- - ``esc_char`` - character to re_escape quotes, typically backslash
- (default= ``None``)
- - ``esc_quote`` - special quote sequence to re_escape an embedded quote
- string (such as SQL's ``""`` to re_escape an embedded ``"``)
- (default= ``None``)
- - ``multiline`` - boolean indicating whether quotes can span
- multiple lines (default= ``False``)
- - ``unquote_results`` - boolean indicating whether the matched text
- should be unquoted (default= ``True``)
- - ``end_quote_char`` - string of one or more characters defining the
- end of the quote delimited string (default= ``None`` => same as
- quote_char)
- - ``convert_whitespace_escapes`` - convert escaped whitespace
- (``'\t'``, ``'\n'``, etc.) to actual whitespace
- (default= ``True``)
-
- Example::
-
- qs = QuotedString('"')
- print(qs.search_string('lsjdf "This is the quote" sldjf'))
- complex_qs = QuotedString('{{', end_quote_char='}}')
- print(complex_qs.search_string('lsjdf {{This is the "quote"}} sldjf'))
- sql_qs = QuotedString('"', esc_quote='""')
- print(sql_qs.search_string('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
-
- prints::
-
- [['This is the quote']]
- [['This is the "quote"']]
- [['This is the quote with "embedded" quotes']]
- """
- ws_map = ((r"\t", "\t"), (r"\n", "\n"), (r"\f", "\f"), (r"\r", "\r"))
-
- def __init__(
- self,
- quote_char: str = "",
- esc_char: typing.Optional[str] = None,
- esc_quote: typing.Optional[str] = None,
- multiline: bool = False,
- unquote_results: bool = True,
- end_quote_char: typing.Optional[str] = None,
- convert_whitespace_escapes: bool = True,
- *,
- quoteChar: str = "",
- escChar: typing.Optional[str] = None,
- escQuote: typing.Optional[str] = None,
- unquoteResults: bool = True,
- endQuoteChar: typing.Optional[str] = None,
- convertWhitespaceEscapes: bool = True,
- ):
- super().__init__()
- escChar = escChar or esc_char
- escQuote = escQuote or esc_quote
- unquoteResults = unquoteResults and unquote_results
- endQuoteChar = endQuoteChar or end_quote_char
- convertWhitespaceEscapes = (
- convertWhitespaceEscapes and convert_whitespace_escapes
- )
- quote_char = quoteChar or quote_char
-
- # remove white space from quote chars - wont work anyway
- quote_char = quote_char.strip()
- if not quote_char:
- raise ValueError("quote_char cannot be the empty string")
-
- if endQuoteChar is None:
- endQuoteChar = quote_char
- else:
- endQuoteChar = endQuoteChar.strip()
- if not endQuoteChar:
- raise ValueError("endQuoteChar cannot be the empty string")
-
- self.quoteChar = quote_char
- self.quoteCharLen = len(quote_char)
- self.firstQuoteChar = quote_char[0]
- self.endQuoteChar = endQuoteChar
- self.endQuoteCharLen = len(endQuoteChar)
- self.escChar = escChar
- self.escQuote = escQuote
- self.unquoteResults = unquoteResults
- self.convertWhitespaceEscapes = convertWhitespaceEscapes
-
- sep = ""
- inner_pattern = ""
-
- if escQuote:
- inner_pattern += r"{}(?:{})".format(sep, re.escape(escQuote))
- sep = "|"
-
- if escChar:
- inner_pattern += r"{}(?:{}.)".format(sep, re.escape(escChar))
- sep = "|"
- self.escCharReplacePattern = re.escape(self.escChar) + "(.)"
-
- if len(self.endQuoteChar) > 1:
- inner_pattern += (
- "{}(?:".format(sep)
- + "|".join(
- "(?:{}(?!{}))".format(
- re.escape(self.endQuoteChar[:i]),
- re.escape(self.endQuoteChar[i:]),
- )
- for i in range(len(self.endQuoteChar) - 1, 0, -1)
- )
- + ")"
- )
- sep = "|"
-
- if multiline:
- self.flags = re.MULTILINE | re.DOTALL
- inner_pattern += r"{}(?:[^{}{}])".format(
- sep,
- _escape_regex_range_chars(self.endQuoteChar[0]),
- (_escape_regex_range_chars(escChar) if escChar is not None else ""),
- )
- else:
- self.flags = 0
- inner_pattern += r"{}(?:[^{}\n\r{}])".format(
- sep,
- _escape_regex_range_chars(self.endQuoteChar[0]),
- (_escape_regex_range_chars(escChar) if escChar is not None else ""),
- )
-
- self.pattern = "".join(
- [
- re.escape(self.quoteChar),
- "(?:",
- inner_pattern,
- ")*",
- re.escape(self.endQuoteChar),
- ]
- )
-
- try:
- self.re = re.compile(self.pattern, self.flags)
- self.reString = self.pattern
- self.re_match = self.re.match
- except re.error:
- raise ValueError(
- "invalid pattern {!r} passed to Regex".format(self.pattern)
- )
-
- self.errmsg = "Expected " + self.name
- self.mayIndexError = False
- self.mayReturnEmpty = True
-
- def _generateDefaultName(self):
- if self.quoteChar == self.endQuoteChar and isinstance(self.quoteChar, str_type):
- return "string enclosed in {!r}".format(self.quoteChar)
-
- return "quoted string, starting with {} ending with {}".format(
- self.quoteChar, self.endQuoteChar
- )
-
- def parseImpl(self, instring, loc, doActions=True):
- result = (
- instring[loc] == self.firstQuoteChar
- and self.re_match(instring, loc)
- or None
- )
- if not result:
- raise ParseException(instring, loc, self.errmsg, self)
-
- loc = result.end()
- ret = result.group()
-
- if self.unquoteResults:
-
- # strip off quotes
- ret = ret[self.quoteCharLen : -self.endQuoteCharLen]
-
- if isinstance(ret, str_type):
- # replace escaped whitespace
- if "\\" in ret and self.convertWhitespaceEscapes:
- for wslit, wschar in self.ws_map:
- ret = ret.replace(wslit, wschar)
-
- # replace escaped characters
- if self.escChar:
- ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
-
- # replace escaped quotes
- if self.escQuote:
- ret = ret.replace(self.escQuote, self.endQuoteChar)
-
- return loc, ret
-
-
-class CharsNotIn(Token):
- """Token for matching words composed of characters *not* in a given
- set (will include whitespace in matched characters if not listed in
- the provided exclusion set - see example). Defined with string
- containing all disallowed characters, and an optional minimum,
- maximum, and/or exact length. The default value for ``min`` is
- 1 (a minimum value < 1 is not valid); the default values for
- ``max`` and ``exact`` are 0, meaning no maximum or exact
- length restriction.
-
- Example::
-
- # define a comma-separated-value as anything that is not a ','
- csv_value = CharsNotIn(',')
- print(delimited_list(csv_value).parse_string("dkls,lsdkjf,s12 34,@!#,213"))
-
- prints::
-
- ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
- """
-
- def __init__(
- self,
- not_chars: str = "",
- min: int = 1,
- max: int = 0,
- exact: int = 0,
- *,
- notChars: str = "",
- ):
- super().__init__()
- self.skipWhitespace = False
- self.notChars = not_chars or notChars
- self.notCharsSet = set(self.notChars)
-
- if min < 1:
- raise ValueError(
- "cannot specify a minimum length < 1; use "
- "Opt(CharsNotIn()) if zero-length char group is permitted"
- )
-
- self.minLen = min
-
- if max > 0:
- self.maxLen = max
- else:
- self.maxLen = _MAX_INT
-
- if exact > 0:
- self.maxLen = exact
- self.minLen = exact
-
- self.errmsg = "Expected " + self.name
- self.mayReturnEmpty = self.minLen == 0
- self.mayIndexError = False
-
- def _generateDefaultName(self):
- not_chars_str = _collapse_string_to_ranges(self.notChars)
- if len(not_chars_str) > 16:
- return "!W:({}...)".format(self.notChars[: 16 - 3])
- else:
- return "!W:({})".format(self.notChars)
-
- def parseImpl(self, instring, loc, doActions=True):
- notchars = self.notCharsSet
- if instring[loc] in notchars:
- raise ParseException(instring, loc, self.errmsg, self)
-
- start = loc
- loc += 1
- maxlen = min(start + self.maxLen, len(instring))
- while loc < maxlen and instring[loc] not in notchars:
- loc += 1
-
- if loc - start < self.minLen:
- raise ParseException(instring, loc, self.errmsg, self)
-
- return loc, instring[start:loc]
-
-
-class White(Token):
- """Special matching class for matching whitespace. Normally,
- whitespace is ignored by pyparsing grammars. This class is included
- when some whitespace structures are significant. Define with
- a string containing the whitespace characters to be matched; default
- is ``" \\t\\r\\n"``. Also takes optional ``min``,
- ``max``, and ``exact`` arguments, as defined for the
- :class:`Word` class.
- """
-
- whiteStrs = {
- " ": "",
- "\t": "",
- "\n": "",
- "\r": "",
- "\f": "",
- "\u00A0": "",
- "\u1680": "",
- "\u180E": "",
- "\u2000": "",
- "\u2001": "",
- "\u2002": "",
- "\u2003": "",
- "\u2004": "",
- "\u2005": "",
- "\u2006": "",
- "\u2007": "",
- "\u2008": "",
- "\u2009": "",
- "\u200A": "",
- "\u200B": "",
- "\u202F": "",
- "\u205F": "",
- "\u3000": "",
- }
-
- def __init__(self, ws: str = " \t\r\n", min: int = 1, max: int = 0, exact: int = 0):
- super().__init__()
- self.matchWhite = ws
- self.set_whitespace_chars(
- "".join(c for c in self.whiteStrs if c not in self.matchWhite),
- copy_defaults=True,
- )
- # self.leave_whitespace()
- self.mayReturnEmpty = True
- self.errmsg = "Expected " + self.name
-
- self.minLen = min
-
- if max > 0:
- self.maxLen = max
- else:
- self.maxLen = _MAX_INT
-
- if exact > 0:
- self.maxLen = exact
- self.minLen = exact
-
- def _generateDefaultName(self):
- return "".join(White.whiteStrs[c] for c in self.matchWhite)
-
- def parseImpl(self, instring, loc, doActions=True):
- if instring[loc] not in self.matchWhite:
- raise ParseException(instring, loc, self.errmsg, self)
- start = loc
- loc += 1
- maxloc = start + self.maxLen
- maxloc = min(maxloc, len(instring))
- while loc < maxloc and instring[loc] in self.matchWhite:
- loc += 1
-
- if loc - start < self.minLen:
- raise ParseException(instring, loc, self.errmsg, self)
-
- return loc, instring[start:loc]
-
-
-class PositionToken(Token):
- def __init__(self):
- super().__init__()
- self.mayReturnEmpty = True
- self.mayIndexError = False
-
-
-class GoToColumn(PositionToken):
- """Token to advance to a specific column of input text; useful for
- tabular report scraping.
- """
-
- def __init__(self, colno: int):
- super().__init__()
- self.col = colno
-
- def preParse(self, instring, loc):
- if col(loc, instring) != self.col:
- instrlen = len(instring)
- if self.ignoreExprs:
- loc = self._skipIgnorables(instring, loc)
- while (
- loc < instrlen
- and instring[loc].isspace()
- and col(loc, instring) != self.col
- ):
- loc += 1
- return loc
-
- def parseImpl(self, instring, loc, doActions=True):
- thiscol = col(loc, instring)
- if thiscol > self.col:
- raise ParseException(instring, loc, "Text not in expected column", self)
- newloc = loc + self.col - thiscol
- ret = instring[loc:newloc]
- return newloc, ret
-
-
-class LineStart(PositionToken):
- r"""Matches if current position is at the beginning of a line within
- the parse string
-
- Example::
-
- test = '''\
- AAA this line
- AAA and this line
- AAA but not this one
- B AAA and definitely not this one
- '''
-
- for t in (LineStart() + 'AAA' + restOfLine).search_string(test):
- print(t)
-
- prints::
-
- ['AAA', ' this line']
- ['AAA', ' and this line']
-
- """
-
- def __init__(self):
- super().__init__()
- self.leave_whitespace()
- self.orig_whiteChars = set() | self.whiteChars
- self.whiteChars.discard("\n")
- self.skipper = Empty().set_whitespace_chars(self.whiteChars)
- self.errmsg = "Expected start of line"
-
- def preParse(self, instring, loc):
- if loc == 0:
- return loc
- else:
- ret = self.skipper.preParse(instring, loc)
- if "\n" in self.orig_whiteChars:
- while instring[ret : ret + 1] == "\n":
- ret = self.skipper.preParse(instring, ret + 1)
- return ret
-
- def parseImpl(self, instring, loc, doActions=True):
- if col(loc, instring) == 1:
- return loc, []
- raise ParseException(instring, loc, self.errmsg, self)
-
-
-class LineEnd(PositionToken):
- """Matches if current position is at the end of a line within the
- parse string
- """
-
- def __init__(self):
- super().__init__()
- self.whiteChars.discard("\n")
- self.set_whitespace_chars(self.whiteChars, copy_defaults=False)
- self.errmsg = "Expected end of line"
-
- def parseImpl(self, instring, loc, doActions=True):
- if loc < len(instring):
- if instring[loc] == "\n":
- return loc + 1, "\n"
- else:
- raise ParseException(instring, loc, self.errmsg, self)
- elif loc == len(instring):
- return loc + 1, []
- else:
- raise ParseException(instring, loc, self.errmsg, self)
-
-
-class StringStart(PositionToken):
- """Matches if current position is at the beginning of the parse
- string
- """
-
- def __init__(self):
- super().__init__()
- self.errmsg = "Expected start of text"
-
- def parseImpl(self, instring, loc, doActions=True):
- if loc != 0:
- # see if entire string up to here is just whitespace and ignoreables
- if loc != self.preParse(instring, 0):
- raise ParseException(instring, loc, self.errmsg, self)
- return loc, []
-
-
-class StringEnd(PositionToken):
- """
- Matches if current position is at the end of the parse string
- """
-
- def __init__(self):
- super().__init__()
- self.errmsg = "Expected end of text"
-
- def parseImpl(self, instring, loc, doActions=True):
- if loc < len(instring):
- raise ParseException(instring, loc, self.errmsg, self)
- elif loc == len(instring):
- return loc + 1, []
- elif loc > len(instring):
- return loc, []
- else:
- raise ParseException(instring, loc, self.errmsg, self)
-
-
-class WordStart(PositionToken):
- """Matches if the current position is at the beginning of a
- :class:`Word`, and is not preceded by any character in a given
- set of ``word_chars`` (default= ``printables``). To emulate the
- ``\b`` behavior of regular expressions, use
- ``WordStart(alphanums)``. ``WordStart`` will also match at
- the beginning of the string being parsed, or at the beginning of
- a line.
- """
-
- def __init__(self, word_chars: str = printables, *, wordChars: str = printables):
- wordChars = word_chars if wordChars == printables else wordChars
- super().__init__()
- self.wordChars = set(wordChars)
- self.errmsg = "Not at the start of a word"
-
- def parseImpl(self, instring, loc, doActions=True):
- if loc != 0:
- if (
- instring[loc - 1] in self.wordChars
- or instring[loc] not in self.wordChars
- ):
- raise ParseException(instring, loc, self.errmsg, self)
- return loc, []
-
-
-class WordEnd(PositionToken):
- """Matches if the current position is at the end of a :class:`Word`,
- and is not followed by any character in a given set of ``word_chars``
- (default= ``printables``). To emulate the ``\b`` behavior of
- regular expressions, use ``WordEnd(alphanums)``. ``WordEnd``
- will also match at the end of the string being parsed, or at the end
- of a line.
- """
-
- def __init__(self, word_chars: str = printables, *, wordChars: str = printables):
- wordChars = word_chars if wordChars == printables else wordChars
- super().__init__()
- self.wordChars = set(wordChars)
- self.skipWhitespace = False
- self.errmsg = "Not at the end of a word"
-
- def parseImpl(self, instring, loc, doActions=True):
- instrlen = len(instring)
- if instrlen > 0 and loc < instrlen:
- if (
- instring[loc] in self.wordChars
- or instring[loc - 1] not in self.wordChars
- ):
- raise ParseException(instring, loc, self.errmsg, self)
- return loc, []
-
-
-class ParseExpression(ParserElement):
- """Abstract subclass of ParserElement, for combining and
- post-processing parsed tokens.
- """
-
- def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False):
- super().__init__(savelist)
- self.exprs: List[ParserElement]
- if isinstance(exprs, _generatorType):
- exprs = list(exprs)
-
- if isinstance(exprs, str_type):
- self.exprs = [self._literalStringClass(exprs)]
- elif isinstance(exprs, ParserElement):
- self.exprs = [exprs]
- elif isinstance(exprs, Iterable):
- exprs = list(exprs)
- # if sequence of strings provided, wrap with Literal
- if any(isinstance(expr, str_type) for expr in exprs):
- exprs = (
- self._literalStringClass(e) if isinstance(e, str_type) else e
- for e in exprs
- )
- self.exprs = list(exprs)
- else:
- try:
- self.exprs = list(exprs)
- except TypeError:
- self.exprs = [exprs]
- self.callPreparse = False
-
- def recurse(self) -> Sequence[ParserElement]:
- return self.exprs[:]
-
- def append(self, other) -> ParserElement:
- self.exprs.append(other)
- self._defaultName = None
- return self
-
- def leave_whitespace(self, recursive: bool = True) -> ParserElement:
- """
- Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on
- all contained expressions.
- """
- super().leave_whitespace(recursive)
-
- if recursive:
- self.exprs = [e.copy() for e in self.exprs]
- for e in self.exprs:
- e.leave_whitespace(recursive)
- return self
-
- def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
- """
- Extends ``ignore_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on
- all contained expressions.
- """
- super().ignore_whitespace(recursive)
- if recursive:
- self.exprs = [e.copy() for e in self.exprs]
- for e in self.exprs:
- e.ignore_whitespace(recursive)
- return self
-
- def ignore(self, other) -> ParserElement:
- if isinstance(other, Suppress):
- if other not in self.ignoreExprs:
- super().ignore(other)
- for e in self.exprs:
- e.ignore(self.ignoreExprs[-1])
- else:
- super().ignore(other)
- for e in self.exprs:
- e.ignore(self.ignoreExprs[-1])
- return self
-
- def _generateDefaultName(self):
- return "{}:({})".format(self.__class__.__name__, str(self.exprs))
-
- def streamline(self) -> ParserElement:
- if self.streamlined:
- return self
-
- super().streamline()
-
- for e in self.exprs:
- e.streamline()
-
- # collapse nested :class:`And`'s of the form ``And(And(And(a, b), c), d)`` to ``And(a, b, c, d)``
- # but only if there are no parse actions or resultsNames on the nested And's
- # (likewise for :class:`Or`'s and :class:`MatchFirst`'s)
- if len(self.exprs) == 2:
- other = self.exprs[0]
- if (
- isinstance(other, self.__class__)
- and not other.parseAction
- and other.resultsName is None
- and not other.debug
- ):
- self.exprs = other.exprs[:] + [self.exprs[1]]
- self._defaultName = None
- self.mayReturnEmpty |= other.mayReturnEmpty
- self.mayIndexError |= other.mayIndexError
-
- other = self.exprs[-1]
- if (
- isinstance(other, self.__class__)
- and not other.parseAction
- and other.resultsName is None
- and not other.debug
- ):
- self.exprs = self.exprs[:-1] + other.exprs[:]
- self._defaultName = None
- self.mayReturnEmpty |= other.mayReturnEmpty
- self.mayIndexError |= other.mayIndexError
-
- self.errmsg = "Expected " + str(self)
-
- return self
-
- def validate(self, validateTrace=None) -> None:
- tmp = (validateTrace if validateTrace is not None else [])[:] + [self]
- for e in self.exprs:
- e.validate(tmp)
- self._checkRecursion([])
-
- def copy(self) -> ParserElement:
- ret = super().copy()
- ret.exprs = [e.copy() for e in self.exprs]
- return ret
-
- def _setResultsName(self, name, listAllMatches=False):
- if (
- __diag__.warn_ungrouped_named_tokens_in_collection
- and Diagnostics.warn_ungrouped_named_tokens_in_collection
- not in self.suppress_warnings_
- ):
- for e in self.exprs:
- if (
- isinstance(e, ParserElement)
- and e.resultsName
- and Diagnostics.warn_ungrouped_named_tokens_in_collection
- not in e.suppress_warnings_
- ):
- warnings.warn(
- "{}: setting results name {!r} on {} expression "
- "collides with {!r} on contained expression".format(
- "warn_ungrouped_named_tokens_in_collection",
- name,
- type(self).__name__,
- e.resultsName,
- ),
- stacklevel=3,
- )
-
- return super()._setResultsName(name, listAllMatches)
-
- ignoreWhitespace = ignore_whitespace
- leaveWhitespace = leave_whitespace
-
-
-class And(ParseExpression):
- """
- Requires all given :class:`ParseExpression` s to be found in the given order.
- Expressions may be separated by whitespace.
- May be constructed using the ``'+'`` operator.
- May also be constructed using the ``'-'`` operator, which will
- suppress backtracking.
-
- Example::
-
- integer = Word(nums)
- name_expr = Word(alphas)[1, ...]
-
- expr = And([integer("id"), name_expr("name"), integer("age")])
- # more easily written as:
- expr = integer("id") + name_expr("name") + integer("age")
- """
-
- class _ErrorStop(Empty):
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- self.leave_whitespace()
-
- def _generateDefaultName(self):
- return "-"
-
- def __init__(
- self, exprs_arg: typing.Iterable[ParserElement], savelist: bool = True
- ):
- exprs: List[ParserElement] = list(exprs_arg)
- if exprs and Ellipsis in exprs:
- tmp = []
- for i, expr in enumerate(exprs):
- if expr is Ellipsis:
- if i < len(exprs) - 1:
- skipto_arg: ParserElement = (Empty() + exprs[i + 1]).exprs[-1]
- tmp.append(SkipTo(skipto_arg)("_skipped*"))
- else:
- raise Exception(
- "cannot construct And with sequence ending in ..."
- )
- else:
- tmp.append(expr)
- exprs[:] = tmp
- super().__init__(exprs, savelist)
- if self.exprs:
- self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
- if not isinstance(self.exprs[0], White):
- self.set_whitespace_chars(
- self.exprs[0].whiteChars,
- copy_defaults=self.exprs[0].copyDefaultWhiteChars,
- )
- self.skipWhitespace = self.exprs[0].skipWhitespace
- else:
- self.skipWhitespace = False
- else:
- self.mayReturnEmpty = True
- self.callPreparse = True
-
- def streamline(self) -> ParserElement:
- # collapse any _PendingSkip's
- if self.exprs:
- if any(
- isinstance(e, ParseExpression)
- and e.exprs
- and isinstance(e.exprs[-1], _PendingSkip)
- for e in self.exprs[:-1]
- ):
- for i, e in enumerate(self.exprs[:-1]):
- if e is None:
- continue
- if (
- isinstance(e, ParseExpression)
- and e.exprs
- and isinstance(e.exprs[-1], _PendingSkip)
- ):
- e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1]
- self.exprs[i + 1] = None
- self.exprs = [e for e in self.exprs if e is not None]
-
- super().streamline()
-
- # link any IndentedBlocks to the prior expression
- for prev, cur in zip(self.exprs, self.exprs[1:]):
- # traverse cur or any first embedded expr of cur looking for an IndentedBlock
- # (but watch out for recursive grammar)
- seen = set()
- while cur:
- if id(cur) in seen:
- break
- seen.add(id(cur))
- if isinstance(cur, IndentedBlock):
- prev.add_parse_action(
- lambda s, l, t, cur_=cur: setattr(
- cur_, "parent_anchor", col(l, s)
- )
- )
- break
- subs = cur.recurse()
- cur = next(iter(subs), None)
-
- self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
- return self
-
- def parseImpl(self, instring, loc, doActions=True):
- # pass False as callPreParse arg to _parse for first element, since we already
- # pre-parsed the string as part of our And pre-parsing
- loc, resultlist = self.exprs[0]._parse(
- instring, loc, doActions, callPreParse=False
- )
- errorStop = False
- for e in self.exprs[1:]:
- # if isinstance(e, And._ErrorStop):
- if type(e) is And._ErrorStop:
- errorStop = True
- continue
- if errorStop:
- try:
- loc, exprtokens = e._parse(instring, loc, doActions)
- except ParseSyntaxException:
- raise
- except ParseBaseException as pe:
- pe.__traceback__ = None
- raise ParseSyntaxException._from_exception(pe)
- except IndexError:
- raise ParseSyntaxException(
- instring, len(instring), self.errmsg, self
- )
- else:
- loc, exprtokens = e._parse(instring, loc, doActions)
- if exprtokens or exprtokens.haskeys():
- resultlist += exprtokens
- return loc, resultlist
-
- def __iadd__(self, other):
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- return self.append(other) # And([self, other])
-
- def _checkRecursion(self, parseElementList):
- subRecCheckList = parseElementList[:] + [self]
- for e in self.exprs:
- e._checkRecursion(subRecCheckList)
- if not e.mayReturnEmpty:
- break
-
- def _generateDefaultName(self):
- inner = " ".join(str(e) for e in self.exprs)
- # strip off redundant inner {}'s
- while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}":
- inner = inner[1:-1]
- return "{" + inner + "}"
-
-
-class Or(ParseExpression):
- """Requires that at least one :class:`ParseExpression` is found. If
- two expressions match, the expression that matches the longest
- string will be used. May be constructed using the ``'^'``
- operator.
-
- Example::
-
- # construct Or using '^' operator
-
- number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
- print(number.search_string("123 3.1416 789"))
-
- prints::
-
- [['123'], ['3.1416'], ['789']]
- """
-
- def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False):
- super().__init__(exprs, savelist)
- if self.exprs:
- self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
- self.skipWhitespace = all(e.skipWhitespace for e in self.exprs)
- else:
- self.mayReturnEmpty = True
-
- def streamline(self) -> ParserElement:
- super().streamline()
- if self.exprs:
- self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
- self.saveAsList = any(e.saveAsList for e in self.exprs)
- self.skipWhitespace = all(
- e.skipWhitespace and not isinstance(e, White) for e in self.exprs
- )
- else:
- self.saveAsList = False
- return self
-
- def parseImpl(self, instring, loc, doActions=True):
- maxExcLoc = -1
- maxException = None
- matches = []
- fatals = []
- if all(e.callPreparse for e in self.exprs):
- loc = self.preParse(instring, loc)
- for e in self.exprs:
- try:
- loc2 = e.try_parse(instring, loc, raise_fatal=True)
- except ParseFatalException as pfe:
- pfe.__traceback__ = None
- pfe.parserElement = e
- fatals.append(pfe)
- maxException = None
- maxExcLoc = -1
- except ParseException as err:
- if not fatals:
- err.__traceback__ = None
- if err.loc > maxExcLoc:
- maxException = err
- maxExcLoc = err.loc
- except IndexError:
- if len(instring) > maxExcLoc:
- maxException = ParseException(
- instring, len(instring), e.errmsg, self
- )
- maxExcLoc = len(instring)
- else:
- # save match among all matches, to retry longest to shortest
- matches.append((loc2, e))
-
- if matches:
- # re-evaluate all matches in descending order of length of match, in case attached actions
- # might change whether or how much they match of the input.
- matches.sort(key=itemgetter(0), reverse=True)
-
- if not doActions:
- # no further conditions or parse actions to change the selection of
- # alternative, so the first match will be the best match
- best_expr = matches[0][1]
- return best_expr._parse(instring, loc, doActions)
-
- longest = -1, None
- for loc1, expr1 in matches:
- if loc1 <= longest[0]:
- # already have a longer match than this one will deliver, we are done
- return longest
-
- try:
- loc2, toks = expr1._parse(instring, loc, doActions)
- except ParseException as err:
- err.__traceback__ = None
- if err.loc > maxExcLoc:
- maxException = err
- maxExcLoc = err.loc
- else:
- if loc2 >= loc1:
- return loc2, toks
- # didn't match as much as before
- elif loc2 > longest[0]:
- longest = loc2, toks
-
- if longest != (-1, None):
- return longest
-
- if fatals:
- if len(fatals) > 1:
- fatals.sort(key=lambda e: -e.loc)
- if fatals[0].loc == fatals[1].loc:
- fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement))))
- max_fatal = fatals[0]
- raise max_fatal
-
- if maxException is not None:
- maxException.msg = self.errmsg
- raise maxException
- else:
- raise ParseException(
- instring, loc, "no defined alternatives to match", self
- )
-
- def __ixor__(self, other):
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- return self.append(other) # Or([self, other])
-
- def _generateDefaultName(self):
- return "{" + " ^ ".join(str(e) for e in self.exprs) + "}"
-
- def _setResultsName(self, name, listAllMatches=False):
- if (
- __diag__.warn_multiple_tokens_in_named_alternation
- and Diagnostics.warn_multiple_tokens_in_named_alternation
- not in self.suppress_warnings_
- ):
- if any(
- isinstance(e, And)
- and Diagnostics.warn_multiple_tokens_in_named_alternation
- not in e.suppress_warnings_
- for e in self.exprs
- ):
- warnings.warn(
- "{}: setting results name {!r} on {} expression "
- "will return a list of all parsed tokens in an And alternative, "
- "in prior versions only the first token was returned; enclose "
- "contained argument in Group".format(
- "warn_multiple_tokens_in_named_alternation",
- name,
- type(self).__name__,
- ),
- stacklevel=3,
- )
-
- return super()._setResultsName(name, listAllMatches)
-
-
-class MatchFirst(ParseExpression):
- """Requires that at least one :class:`ParseExpression` is found. If
- more than one expression matches, the first one listed is the one that will
- match. May be constructed using the ``'|'`` operator.
-
- Example::
-
- # construct MatchFirst using '|' operator
-
- # watch the order of expressions to match
- number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
- print(number.search_string("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
-
- # put more selective expression first
- number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
- print(number.search_string("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
- """
-
- def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False):
- super().__init__(exprs, savelist)
- if self.exprs:
- self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
- self.skipWhitespace = all(e.skipWhitespace for e in self.exprs)
- else:
- self.mayReturnEmpty = True
-
- def streamline(self) -> ParserElement:
- if self.streamlined:
- return self
-
- super().streamline()
- if self.exprs:
- self.saveAsList = any(e.saveAsList for e in self.exprs)
- self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
- self.skipWhitespace = all(
- e.skipWhitespace and not isinstance(e, White) for e in self.exprs
- )
- else:
- self.saveAsList = False
- self.mayReturnEmpty = True
- return self
-
- def parseImpl(self, instring, loc, doActions=True):
- maxExcLoc = -1
- maxException = None
-
- for e in self.exprs:
- try:
- return e._parse(
- instring,
- loc,
- doActions,
- )
- except ParseFatalException as pfe:
- pfe.__traceback__ = None
- pfe.parserElement = e
- raise
- except ParseException as err:
- if err.loc > maxExcLoc:
- maxException = err
- maxExcLoc = err.loc
- except IndexError:
- if len(instring) > maxExcLoc:
- maxException = ParseException(
- instring, len(instring), e.errmsg, self
- )
- maxExcLoc = len(instring)
-
- if maxException is not None:
- maxException.msg = self.errmsg
- raise maxException
- else:
- raise ParseException(
- instring, loc, "no defined alternatives to match", self
- )
-
- def __ior__(self, other):
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- return self.append(other) # MatchFirst([self, other])
-
- def _generateDefaultName(self):
- return "{" + " | ".join(str(e) for e in self.exprs) + "}"
-
- def _setResultsName(self, name, listAllMatches=False):
- if (
- __diag__.warn_multiple_tokens_in_named_alternation
- and Diagnostics.warn_multiple_tokens_in_named_alternation
- not in self.suppress_warnings_
- ):
- if any(
- isinstance(e, And)
- and Diagnostics.warn_multiple_tokens_in_named_alternation
- not in e.suppress_warnings_
- for e in self.exprs
- ):
- warnings.warn(
- "{}: setting results name {!r} on {} expression "
- "will return a list of all parsed tokens in an And alternative, "
- "in prior versions only the first token was returned; enclose "
- "contained argument in Group".format(
- "warn_multiple_tokens_in_named_alternation",
- name,
- type(self).__name__,
- ),
- stacklevel=3,
- )
-
- return super()._setResultsName(name, listAllMatches)
-
-
-class Each(ParseExpression):
- """Requires all given :class:`ParseExpression` s to be found, but in
- any order. Expressions may be separated by whitespace.
-
- May be constructed using the ``'&'`` operator.
-
- Example::
-
- color = one_of("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
- shape_type = one_of("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
- integer = Word(nums)
- shape_attr = "shape:" + shape_type("shape")
- posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
- color_attr = "color:" + color("color")
- size_attr = "size:" + integer("size")
-
- # use Each (using operator '&') to accept attributes in any order
- # (shape and posn are required, color and size are optional)
- shape_spec = shape_attr & posn_attr & Opt(color_attr) & Opt(size_attr)
-
- shape_spec.run_tests('''
- shape: SQUARE color: BLACK posn: 100, 120
- shape: CIRCLE size: 50 color: BLUE posn: 50,80
- color:GREEN size:20 shape:TRIANGLE posn:20,40
- '''
- )
-
- prints::
-
- shape: SQUARE color: BLACK posn: 100, 120
- ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- - color: BLACK
- - posn: ['100', ',', '120']
- - x: 100
- - y: 120
- - shape: SQUARE
-
-
- shape: CIRCLE size: 50 color: BLUE posn: 50,80
- ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- - color: BLUE
- - posn: ['50', ',', '80']
- - x: 50
- - y: 80
- - shape: CIRCLE
- - size: 50
-
-
- color: GREEN size: 20 shape: TRIANGLE posn: 20,40
- ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- - color: GREEN
- - posn: ['20', ',', '40']
- - x: 20
- - y: 40
- - shape: TRIANGLE
- - size: 20
- """
-
- def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = True):
- super().__init__(exprs, savelist)
- if self.exprs:
- self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
- else:
- self.mayReturnEmpty = True
- self.skipWhitespace = True
- self.initExprGroups = True
- self.saveAsList = True
-
- def streamline(self) -> ParserElement:
- super().streamline()
- if self.exprs:
- self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
- else:
- self.mayReturnEmpty = True
- return self
-
- def parseImpl(self, instring, loc, doActions=True):
- if self.initExprGroups:
- self.opt1map = dict(
- (id(e.expr), e) for e in self.exprs if isinstance(e, Opt)
- )
- opt1 = [e.expr for e in self.exprs if isinstance(e, Opt)]
- opt2 = [
- e
- for e in self.exprs
- if e.mayReturnEmpty and not isinstance(e, (Opt, Regex, ZeroOrMore))
- ]
- self.optionals = opt1 + opt2
- self.multioptionals = [
- e.expr.set_results_name(e.resultsName, list_all_matches=True)
- for e in self.exprs
- if isinstance(e, _MultipleMatch)
- ]
- self.multirequired = [
- e.expr.set_results_name(e.resultsName, list_all_matches=True)
- for e in self.exprs
- if isinstance(e, OneOrMore)
- ]
- self.required = [
- e for e in self.exprs if not isinstance(e, (Opt, ZeroOrMore, OneOrMore))
- ]
- self.required += self.multirequired
- self.initExprGroups = False
-
- tmpLoc = loc
- tmpReqd = self.required[:]
- tmpOpt = self.optionals[:]
- multis = self.multioptionals[:]
- matchOrder = []
-
- keepMatching = True
- failed = []
- fatals = []
- while keepMatching:
- tmpExprs = tmpReqd + tmpOpt + multis
- failed.clear()
- fatals.clear()
- for e in tmpExprs:
- try:
- tmpLoc = e.try_parse(instring, tmpLoc, raise_fatal=True)
- except ParseFatalException as pfe:
- pfe.__traceback__ = None
- pfe.parserElement = e
- fatals.append(pfe)
- failed.append(e)
- except ParseException:
- failed.append(e)
- else:
- matchOrder.append(self.opt1map.get(id(e), e))
- if e in tmpReqd:
- tmpReqd.remove(e)
- elif e in tmpOpt:
- tmpOpt.remove(e)
- if len(failed) == len(tmpExprs):
- keepMatching = False
-
- # look for any ParseFatalExceptions
- if fatals:
- if len(fatals) > 1:
- fatals.sort(key=lambda e: -e.loc)
- if fatals[0].loc == fatals[1].loc:
- fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement))))
- max_fatal = fatals[0]
- raise max_fatal
-
- if tmpReqd:
- missing = ", ".join([str(e) for e in tmpReqd])
- raise ParseException(
- instring,
- loc,
- "Missing one or more required elements ({})".format(missing),
- )
-
- # add any unmatched Opts, in case they have default values defined
- matchOrder += [e for e in self.exprs if isinstance(e, Opt) and e.expr in tmpOpt]
-
- total_results = ParseResults([])
- for e in matchOrder:
- loc, results = e._parse(instring, loc, doActions)
- total_results += results
-
- return loc, total_results
-
- def _generateDefaultName(self):
- return "{" + " & ".join(str(e) for e in self.exprs) + "}"
-
-
-class ParseElementEnhance(ParserElement):
- """Abstract subclass of :class:`ParserElement`, for combining and
- post-processing parsed tokens.
- """
-
- def __init__(self, expr: Union[ParserElement, str], savelist: bool = False):
- super().__init__(savelist)
- if isinstance(expr, str_type):
- if issubclass(self._literalStringClass, Token):
- expr = self._literalStringClass(expr)
- elif issubclass(type(self), self._literalStringClass):
- expr = Literal(expr)
- else:
- expr = self._literalStringClass(Literal(expr))
- self.expr = expr
- if expr is not None:
- self.mayIndexError = expr.mayIndexError
- self.mayReturnEmpty = expr.mayReturnEmpty
- self.set_whitespace_chars(
- expr.whiteChars, copy_defaults=expr.copyDefaultWhiteChars
- )
- self.skipWhitespace = expr.skipWhitespace
- self.saveAsList = expr.saveAsList
- self.callPreparse = expr.callPreparse
- self.ignoreExprs.extend(expr.ignoreExprs)
-
- def recurse(self) -> Sequence[ParserElement]:
- return [self.expr] if self.expr is not None else []
-
- def parseImpl(self, instring, loc, doActions=True):
- if self.expr is not None:
- return self.expr._parse(instring, loc, doActions, callPreParse=False)
- else:
- raise ParseException(instring, loc, "No expression defined", self)
-
- def leave_whitespace(self, recursive: bool = True) -> ParserElement:
- super().leave_whitespace(recursive)
-
- if recursive:
- self.expr = self.expr.copy()
- if self.expr is not None:
- self.expr.leave_whitespace(recursive)
- return self
-
- def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
- super().ignore_whitespace(recursive)
-
- if recursive:
- self.expr = self.expr.copy()
- if self.expr is not None:
- self.expr.ignore_whitespace(recursive)
- return self
-
- def ignore(self, other) -> ParserElement:
- if isinstance(other, Suppress):
- if other not in self.ignoreExprs:
- super().ignore(other)
- if self.expr is not None:
- self.expr.ignore(self.ignoreExprs[-1])
- else:
- super().ignore(other)
- if self.expr is not None:
- self.expr.ignore(self.ignoreExprs[-1])
- return self
-
- def streamline(self) -> ParserElement:
- super().streamline()
- if self.expr is not None:
- self.expr.streamline()
- return self
-
- def _checkRecursion(self, parseElementList):
- if self in parseElementList:
- raise RecursiveGrammarException(parseElementList + [self])
- subRecCheckList = parseElementList[:] + [self]
- if self.expr is not None:
- self.expr._checkRecursion(subRecCheckList)
-
- def validate(self, validateTrace=None) -> None:
- if validateTrace is None:
- validateTrace = []
- tmp = validateTrace[:] + [self]
- if self.expr is not None:
- self.expr.validate(tmp)
- self._checkRecursion([])
-
- def _generateDefaultName(self):
- return "{}:({})".format(self.__class__.__name__, str(self.expr))
-
- ignoreWhitespace = ignore_whitespace
- leaveWhitespace = leave_whitespace
-
-
-class IndentedBlock(ParseElementEnhance):
- """
- Expression to match one or more expressions at a given indentation level.
- Useful for parsing text where structure is implied by indentation (like Python source code).
- """
-
- class _Indent(Empty):
- def __init__(self, ref_col: int):
- super().__init__()
- self.errmsg = "expected indent at column {}".format(ref_col)
- self.add_condition(lambda s, l, t: col(l, s) == ref_col)
-
- class _IndentGreater(Empty):
- def __init__(self, ref_col: int):
- super().__init__()
- self.errmsg = "expected indent at column greater than {}".format(ref_col)
- self.add_condition(lambda s, l, t: col(l, s) > ref_col)
-
- def __init__(
- self, expr: ParserElement, *, recursive: bool = False, grouped: bool = True
- ):
- super().__init__(expr, savelist=True)
- # if recursive:
- # raise NotImplementedError("IndentedBlock with recursive is not implemented")
- self._recursive = recursive
- self._grouped = grouped
- self.parent_anchor = 1
-
- def parseImpl(self, instring, loc, doActions=True):
- # advance parse position to non-whitespace by using an Empty()
- # this should be the column to be used for all subsequent indented lines
- anchor_loc = Empty().preParse(instring, loc)
-
- # see if self.expr matches at the current location - if not it will raise an exception
- # and no further work is necessary
- self.expr.try_parse(instring, anchor_loc, doActions)
-
- indent_col = col(anchor_loc, instring)
- peer_detect_expr = self._Indent(indent_col)
-
- inner_expr = Empty() + peer_detect_expr + self.expr
- if self._recursive:
- sub_indent = self._IndentGreater(indent_col)
- nested_block = IndentedBlock(
- self.expr, recursive=self._recursive, grouped=self._grouped
- )
- nested_block.set_debug(self.debug)
- nested_block.parent_anchor = indent_col
- inner_expr += Opt(sub_indent + nested_block)
-
- inner_expr.set_name(f"inner {hex(id(inner_expr))[-4:].upper()}@{indent_col}")
- block = OneOrMore(inner_expr)
-
- trailing_undent = self._Indent(self.parent_anchor) | StringEnd()
-
- if self._grouped:
- wrapper = Group
- else:
- wrapper = lambda expr: expr
- return (wrapper(block) + Optional(trailing_undent)).parseImpl(
- instring, anchor_loc, doActions
- )
-
-
-class AtStringStart(ParseElementEnhance):
- """Matches if expression matches at the beginning of the parse
- string::
-
- AtStringStart(Word(nums)).parse_string("123")
- # prints ["123"]
-
- AtStringStart(Word(nums)).parse_string(" 123")
- # raises ParseException
- """
-
- def __init__(self, expr: Union[ParserElement, str]):
- super().__init__(expr)
- self.callPreparse = False
-
- def parseImpl(self, instring, loc, doActions=True):
- if loc != 0:
- raise ParseException(instring, loc, "not found at string start")
- return super().parseImpl(instring, loc, doActions)
-
-
-class AtLineStart(ParseElementEnhance):
- r"""Matches if an expression matches at the beginning of a line within
- the parse string
-
- Example::
-
- test = '''\
- AAA this line
- AAA and this line
- AAA but not this one
- B AAA and definitely not this one
- '''
-
- for t in (AtLineStart('AAA') + restOfLine).search_string(test):
- print(t)
-
- prints::
-
- ['AAA', ' this line']
- ['AAA', ' and this line']
-
- """
-
- def __init__(self, expr: Union[ParserElement, str]):
- super().__init__(expr)
- self.callPreparse = False
-
- def parseImpl(self, instring, loc, doActions=True):
- if col(loc, instring) != 1:
- raise ParseException(instring, loc, "not found at line start")
- return super().parseImpl(instring, loc, doActions)
-
-
-class FollowedBy(ParseElementEnhance):
- """Lookahead matching of the given parse expression.
- ``FollowedBy`` does *not* advance the parsing position within
- the input string, it only verifies that the specified parse
- expression matches at the current position. ``FollowedBy``
- always returns a null token list. If any results names are defined
- in the lookahead expression, those *will* be returned for access by
- name.
-
- Example::
-
- # use FollowedBy to match a label only if it is followed by a ':'
- data_word = Word(alphas)
- label = data_word + FollowedBy(':')
- attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
-
- attr_expr[1, ...].parse_string("shape: SQUARE color: BLACK posn: upper left").pprint()
-
- prints::
-
- [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
- """
-
- def __init__(self, expr: Union[ParserElement, str]):
- super().__init__(expr)
- self.mayReturnEmpty = True
-
- def parseImpl(self, instring, loc, doActions=True):
- # by using self._expr.parse and deleting the contents of the returned ParseResults list
- # we keep any named results that were defined in the FollowedBy expression
- _, ret = self.expr._parse(instring, loc, doActions=doActions)
- del ret[:]
-
- return loc, ret
-
-
-class PrecededBy(ParseElementEnhance):
- """Lookbehind matching of the given parse expression.
- ``PrecededBy`` does not advance the parsing position within the
- input string, it only verifies that the specified parse expression
- matches prior to the current position. ``PrecededBy`` always
- returns a null token list, but if a results name is defined on the
- given expression, it is returned.
-
- Parameters:
-
- - expr - expression that must match prior to the current parse
- location
- - retreat - (default= ``None``) - (int) maximum number of characters
- to lookbehind prior to the current parse location
-
- If the lookbehind expression is a string, :class:`Literal`,
- :class:`Keyword`, or a :class:`Word` or :class:`CharsNotIn`
- with a specified exact or maximum length, then the retreat
- parameter is not required. Otherwise, retreat must be specified to
- give a maximum number of characters to look back from
- the current parse position for a lookbehind match.
-
- Example::
-
- # VB-style variable names with type prefixes
- int_var = PrecededBy("#") + pyparsing_common.identifier
- str_var = PrecededBy("$") + pyparsing_common.identifier
-
- """
-
- def __init__(
- self, expr: Union[ParserElement, str], retreat: typing.Optional[int] = None
- ):
- super().__init__(expr)
- self.expr = self.expr().leave_whitespace()
- self.mayReturnEmpty = True
- self.mayIndexError = False
- self.exact = False
- if isinstance(expr, str_type):
- retreat = len(expr)
- self.exact = True
- elif isinstance(expr, (Literal, Keyword)):
- retreat = expr.matchLen
- self.exact = True
- elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT:
- retreat = expr.maxLen
- self.exact = True
- elif isinstance(expr, PositionToken):
- retreat = 0
- self.exact = True
- self.retreat = retreat
- self.errmsg = "not preceded by " + str(expr)
- self.skipWhitespace = False
- self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None)))
-
- def parseImpl(self, instring, loc=0, doActions=True):
- if self.exact:
- if loc < self.retreat:
- raise ParseException(instring, loc, self.errmsg)
- start = loc - self.retreat
- _, ret = self.expr._parse(instring, start)
- else:
- # retreat specified a maximum lookbehind window, iterate
- test_expr = self.expr + StringEnd()
- instring_slice = instring[max(0, loc - self.retreat) : loc]
- last_expr = ParseException(instring, loc, self.errmsg)
- for offset in range(1, min(loc, self.retreat + 1) + 1):
- try:
- # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:]))
- _, ret = test_expr._parse(
- instring_slice, len(instring_slice) - offset
- )
- except ParseBaseException as pbe:
- last_expr = pbe
- else:
- break
- else:
- raise last_expr
- return loc, ret
-
-
-class Located(ParseElementEnhance):
- """
- Decorates a returned token with its starting and ending
- locations in the input string.
-
- This helper adds the following results names:
-
- - ``locn_start`` - location where matched expression begins
- - ``locn_end`` - location where matched expression ends
- - ``value`` - the actual parsed results
-
- Be careful if the input text contains ```` characters, you
- may want to call :class:`ParserElement.parse_with_tabs`
-
- Example::
-
- wd = Word(alphas)
- for match in Located(wd).search_string("ljsdf123lksdjjf123lkkjj1222"):
- print(match)
-
- prints::
-
- [0, ['ljsdf'], 5]
- [8, ['lksdjjf'], 15]
- [18, ['lkkjj'], 23]
-
- """
-
- def parseImpl(self, instring, loc, doActions=True):
- start = loc
- loc, tokens = self.expr._parse(instring, start, doActions, callPreParse=False)
- ret_tokens = ParseResults([start, tokens, loc])
- ret_tokens["locn_start"] = start
- ret_tokens["value"] = tokens
- ret_tokens["locn_end"] = loc
- if self.resultsName:
- # must return as a list, so that the name will be attached to the complete group
- return loc, [ret_tokens]
- else:
- return loc, ret_tokens
-
-
-class NotAny(ParseElementEnhance):
- """
- Lookahead to disallow matching with the given parse expression.
- ``NotAny`` does *not* advance the parsing position within the
- input string, it only verifies that the specified parse expression
- does *not* match at the current position. Also, ``NotAny`` does
- *not* skip over leading whitespace. ``NotAny`` always returns
- a null token list. May be constructed using the ``'~'`` operator.
-
- Example::
-
- AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split())
-
- # take care not to mistake keywords for identifiers
- ident = ~(AND | OR | NOT) + Word(alphas)
- boolean_term = Opt(NOT) + ident
-
- # very crude boolean expression - to support parenthesis groups and
- # operation hierarchy, use infix_notation
- boolean_expr = boolean_term + ((AND | OR) + boolean_term)[...]
-
- # integers that are followed by "." are actually floats
- integer = Word(nums) + ~Char(".")
- """
-
- def __init__(self, expr: Union[ParserElement, str]):
- super().__init__(expr)
- # do NOT use self.leave_whitespace(), don't want to propagate to exprs
- # self.leave_whitespace()
- self.skipWhitespace = False
-
- self.mayReturnEmpty = True
- self.errmsg = "Found unwanted token, " + str(self.expr)
-
- def parseImpl(self, instring, loc, doActions=True):
- if self.expr.can_parse_next(instring, loc):
- raise ParseException(instring, loc, self.errmsg, self)
- return loc, []
-
- def _generateDefaultName(self):
- return "~{" + str(self.expr) + "}"
-
-
-class _MultipleMatch(ParseElementEnhance):
- def __init__(
- self,
- expr: ParserElement,
- stop_on: typing.Optional[Union[ParserElement, str]] = None,
- *,
- stopOn: typing.Optional[Union[ParserElement, str]] = None,
- ):
- super().__init__(expr)
- stopOn = stopOn or stop_on
- self.saveAsList = True
- ender = stopOn
- if isinstance(ender, str_type):
- ender = self._literalStringClass(ender)
- self.stopOn(ender)
-
- def stopOn(self, ender) -> ParserElement:
- if isinstance(ender, str_type):
- ender = self._literalStringClass(ender)
- self.not_ender = ~ender if ender is not None else None
- return self
-
- def parseImpl(self, instring, loc, doActions=True):
- self_expr_parse = self.expr._parse
- self_skip_ignorables = self._skipIgnorables
- check_ender = self.not_ender is not None
- if check_ender:
- try_not_ender = self.not_ender.tryParse
-
- # must be at least one (but first see if we are the stopOn sentinel;
- # if so, fail)
- if check_ender:
- try_not_ender(instring, loc)
- loc, tokens = self_expr_parse(instring, loc, doActions)
- try:
- hasIgnoreExprs = not not self.ignoreExprs
- while 1:
- if check_ender:
- try_not_ender(instring, loc)
- if hasIgnoreExprs:
- preloc = self_skip_ignorables(instring, loc)
- else:
- preloc = loc
- loc, tmptokens = self_expr_parse(instring, preloc, doActions)
- if tmptokens or tmptokens.haskeys():
- tokens += tmptokens
- except (ParseException, IndexError):
- pass
-
- return loc, tokens
-
- def _setResultsName(self, name, listAllMatches=False):
- if (
- __diag__.warn_ungrouped_named_tokens_in_collection
- and Diagnostics.warn_ungrouped_named_tokens_in_collection
- not in self.suppress_warnings_
- ):
- for e in [self.expr] + self.expr.recurse():
- if (
- isinstance(e, ParserElement)
- and e.resultsName
- and Diagnostics.warn_ungrouped_named_tokens_in_collection
- not in e.suppress_warnings_
- ):
- warnings.warn(
- "{}: setting results name {!r} on {} expression "
- "collides with {!r} on contained expression".format(
- "warn_ungrouped_named_tokens_in_collection",
- name,
- type(self).__name__,
- e.resultsName,
- ),
- stacklevel=3,
- )
-
- return super()._setResultsName(name, listAllMatches)
-
-
-class OneOrMore(_MultipleMatch):
- """
- Repetition of one or more of the given expression.
-
- Parameters:
- - expr - expression that must match one or more times
- - stop_on - (default= ``None``) - expression for a terminating sentinel
- (only required if the sentinel would ordinarily match the repetition
- expression)
-
- Example::
-
- data_word = Word(alphas)
- label = data_word + FollowedBy(':')
- attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).set_parse_action(' '.join))
-
- text = "shape: SQUARE posn: upper left color: BLACK"
- attr_expr[1, ...].parse_string(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
-
- # use stop_on attribute for OneOrMore to avoid reading label string as part of the data
- attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
- OneOrMore(attr_expr).parse_string(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
-
- # could also be written as
- (attr_expr * (1,)).parse_string(text).pprint()
- """
-
- def _generateDefaultName(self):
- return "{" + str(self.expr) + "}..."
-
-
-class ZeroOrMore(_MultipleMatch):
- """
- Optional repetition of zero or more of the given expression.
-
- Parameters:
- - ``expr`` - expression that must match zero or more times
- - ``stop_on`` - expression for a terminating sentinel
- (only required if the sentinel would ordinarily match the repetition
- expression) - (default= ``None``)
-
- Example: similar to :class:`OneOrMore`
- """
-
- def __init__(
- self,
- expr: ParserElement,
- stop_on: typing.Optional[Union[ParserElement, str]] = None,
- *,
- stopOn: typing.Optional[Union[ParserElement, str]] = None,
- ):
- super().__init__(expr, stopOn=stopOn or stop_on)
- self.mayReturnEmpty = True
-
- def parseImpl(self, instring, loc, doActions=True):
- try:
- return super().parseImpl(instring, loc, doActions)
- except (ParseException, IndexError):
- return loc, ParseResults([], name=self.resultsName)
-
- def _generateDefaultName(self):
- return "[" + str(self.expr) + "]..."
-
-
-class _NullToken:
- def __bool__(self):
- return False
-
- def __str__(self):
- return ""
-
-
-class Opt(ParseElementEnhance):
- """
- Optional matching of the given expression.
-
- Parameters:
- - ``expr`` - expression that must match zero or more times
- - ``default`` (optional) - value to be returned if the optional expression is not found.
-
- Example::
-
- # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
- zip = Combine(Word(nums, exact=5) + Opt('-' + Word(nums, exact=4)))
- zip.run_tests('''
- # traditional ZIP code
- 12345
-
- # ZIP+4 form
- 12101-0001
-
- # invalid ZIP
- 98765-
- ''')
-
- prints::
-
- # traditional ZIP code
- 12345
- ['12345']
-
- # ZIP+4 form
- 12101-0001
- ['12101-0001']
-
- # invalid ZIP
- 98765-
- ^
- FAIL: Expected end of text (at char 5), (line:1, col:6)
- """
-
- __optionalNotMatched = _NullToken()
-
- def __init__(
- self, expr: Union[ParserElement, str], default: Any = __optionalNotMatched
- ):
- super().__init__(expr, savelist=False)
- self.saveAsList = self.expr.saveAsList
- self.defaultValue = default
- self.mayReturnEmpty = True
-
- def parseImpl(self, instring, loc, doActions=True):
- self_expr = self.expr
- try:
- loc, tokens = self_expr._parse(instring, loc, doActions, callPreParse=False)
- except (ParseException, IndexError):
- default_value = self.defaultValue
- if default_value is not self.__optionalNotMatched:
- if self_expr.resultsName:
- tokens = ParseResults([default_value])
- tokens[self_expr.resultsName] = default_value
- else:
- tokens = [default_value]
- else:
- tokens = []
- return loc, tokens
-
- def _generateDefaultName(self):
- inner = str(self.expr)
- # strip off redundant inner {}'s
- while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}":
- inner = inner[1:-1]
- return "[" + inner + "]"
-
-
-Optional = Opt
-
-
-class SkipTo(ParseElementEnhance):
- """
- Token for skipping over all undefined text until the matched
- expression is found.
-
- Parameters:
- - ``expr`` - target expression marking the end of the data to be skipped
- - ``include`` - if ``True``, the target expression is also parsed
- (the skipped text and target expression are returned as a 2-element
- list) (default= ``False``).
- - ``ignore`` - (default= ``None``) used to define grammars (typically quoted strings and
- comments) that might contain false matches to the target expression
- - ``fail_on`` - (default= ``None``) define expressions that are not allowed to be
- included in the skipped test; if found before the target expression is found,
- the :class:`SkipTo` is not a match
-
- Example::
-
- report = '''
- Outstanding Issues Report - 1 Jan 2000
-
- # | Severity | Description | Days Open
- -----+----------+-------------------------------------------+-----------
- 101 | Critical | Intermittent system crash | 6
- 94 | Cosmetic | Spelling error on Login ('log|n') | 14
- 79 | Minor | System slow when running too many reports | 47
- '''
- integer = Word(nums)
- SEP = Suppress('|')
- # use SkipTo to simply match everything up until the next SEP
- # - ignore quoted strings, so that a '|' character inside a quoted string does not match
- # - parse action will call token.strip() for each matched token, i.e., the description body
- string_data = SkipTo(SEP, ignore=quoted_string)
- string_data.set_parse_action(token_map(str.strip))
- ticket_expr = (integer("issue_num") + SEP
- + string_data("sev") + SEP
- + string_data("desc") + SEP
- + integer("days_open"))
-
- for tkt in ticket_expr.search_string(report):
- print tkt.dump()
-
- prints::
-
- ['101', 'Critical', 'Intermittent system crash', '6']
- - days_open: '6'
- - desc: 'Intermittent system crash'
- - issue_num: '101'
- - sev: 'Critical'
- ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- - days_open: '14'
- - desc: "Spelling error on Login ('log|n')"
- - issue_num: '94'
- - sev: 'Cosmetic'
- ['79', 'Minor', 'System slow when running too many reports', '47']
- - days_open: '47'
- - desc: 'System slow when running too many reports'
- - issue_num: '79'
- - sev: 'Minor'
- """
-
- def __init__(
- self,
- other: Union[ParserElement, str],
- include: bool = False,
- ignore: bool = None,
- fail_on: typing.Optional[Union[ParserElement, str]] = None,
- *,
- failOn: Union[ParserElement, str] = None,
- ):
- super().__init__(other)
- failOn = failOn or fail_on
- self.ignoreExpr = ignore
- self.mayReturnEmpty = True
- self.mayIndexError = False
- self.includeMatch = include
- self.saveAsList = False
- if isinstance(failOn, str_type):
- self.failOn = self._literalStringClass(failOn)
- else:
- self.failOn = failOn
- self.errmsg = "No match found for " + str(self.expr)
-
- def parseImpl(self, instring, loc, doActions=True):
- startloc = loc
- instrlen = len(instring)
- self_expr_parse = self.expr._parse
- self_failOn_canParseNext = (
- self.failOn.canParseNext if self.failOn is not None else None
- )
- self_ignoreExpr_tryParse = (
- self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
- )
-
- tmploc = loc
- while tmploc <= instrlen:
- if self_failOn_canParseNext is not None:
- # break if failOn expression matches
- if self_failOn_canParseNext(instring, tmploc):
- break
-
- if self_ignoreExpr_tryParse is not None:
- # advance past ignore expressions
- while 1:
- try:
- tmploc = self_ignoreExpr_tryParse(instring, tmploc)
- except ParseBaseException:
- break
-
- try:
- self_expr_parse(instring, tmploc, doActions=False, callPreParse=False)
- except (ParseException, IndexError):
- # no match, advance loc in string
- tmploc += 1
- else:
- # matched skipto expr, done
- break
-
- else:
- # ran off the end of the input string without matching skipto expr, fail
- raise ParseException(instring, loc, self.errmsg, self)
-
- # build up return values
- loc = tmploc
- skiptext = instring[startloc:loc]
- skipresult = ParseResults(skiptext)
-
- if self.includeMatch:
- loc, mat = self_expr_parse(instring, loc, doActions, callPreParse=False)
- skipresult += mat
-
- return loc, skipresult
-
-
-class Forward(ParseElementEnhance):
- """
- Forward declaration of an expression to be defined later -
- used for recursive grammars, such as algebraic infix notation.
- When the expression is known, it is assigned to the ``Forward``
- variable using the ``'<<'`` operator.
-
- Note: take care when assigning to ``Forward`` not to overlook
- precedence of operators.
-
- Specifically, ``'|'`` has a lower precedence than ``'<<'``, so that::
-
- fwd_expr << a | b | c
-
- will actually be evaluated as::
-
- (fwd_expr << a) | b | c
-
- thereby leaving b and c out as parseable alternatives. It is recommended that you
- explicitly group the values inserted into the ``Forward``::
-
- fwd_expr << (a | b | c)
-
- Converting to use the ``'<<='`` operator instead will avoid this problem.
-
- See :class:`ParseResults.pprint` for an example of a recursive
- parser created using ``Forward``.
- """
-
- def __init__(self, other: typing.Optional[Union[ParserElement, str]] = None):
- self.caller_frame = traceback.extract_stack(limit=2)[0]
- super().__init__(other, savelist=False)
- self.lshift_line = None
-
- def __lshift__(self, other):
- if hasattr(self, "caller_frame"):
- del self.caller_frame
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- self.expr = other
- self.mayIndexError = self.expr.mayIndexError
- self.mayReturnEmpty = self.expr.mayReturnEmpty
- self.set_whitespace_chars(
- self.expr.whiteChars, copy_defaults=self.expr.copyDefaultWhiteChars
- )
- self.skipWhitespace = self.expr.skipWhitespace
- self.saveAsList = self.expr.saveAsList
- self.ignoreExprs.extend(self.expr.ignoreExprs)
- self.lshift_line = traceback.extract_stack(limit=2)[-2]
- return self
-
- def __ilshift__(self, other):
- return self << other
-
- def __or__(self, other):
- caller_line = traceback.extract_stack(limit=2)[-2]
- if (
- __diag__.warn_on_match_first_with_lshift_operator
- and caller_line == self.lshift_line
- and Diagnostics.warn_on_match_first_with_lshift_operator
- not in self.suppress_warnings_
- ):
- warnings.warn(
- "using '<<' operator with '|' is probably an error, use '<<='",
- stacklevel=2,
- )
- ret = super().__or__(other)
- return ret
-
- def __del__(self):
- # see if we are getting dropped because of '=' reassignment of var instead of '<<=' or '<<'
- if (
- self.expr is None
- and __diag__.warn_on_assignment_to_Forward
- and Diagnostics.warn_on_assignment_to_Forward not in self.suppress_warnings_
- ):
- warnings.warn_explicit(
- "Forward defined here but no expression attached later using '<<=' or '<<'",
- UserWarning,
- filename=self.caller_frame.filename,
- lineno=self.caller_frame.lineno,
- )
-
- def parseImpl(self, instring, loc, doActions=True):
- if (
- self.expr is None
- and __diag__.warn_on_parse_using_empty_Forward
- and Diagnostics.warn_on_parse_using_empty_Forward
- not in self.suppress_warnings_
- ):
- # walk stack until parse_string, scan_string, search_string, or transform_string is found
- parse_fns = [
- "parse_string",
- "scan_string",
- "search_string",
- "transform_string",
- ]
- tb = traceback.extract_stack(limit=200)
- for i, frm in enumerate(reversed(tb), start=1):
- if frm.name in parse_fns:
- stacklevel = i + 1
- break
- else:
- stacklevel = 2
- warnings.warn(
- "Forward expression was never assigned a value, will not parse any input",
- stacklevel=stacklevel,
- )
- if not ParserElement._left_recursion_enabled:
- return super().parseImpl(instring, loc, doActions)
- # ## Bounded Recursion algorithm ##
- # Recursion only needs to be processed at ``Forward`` elements, since they are
- # the only ones that can actually refer to themselves. The general idea is
- # to handle recursion stepwise: We start at no recursion, then recurse once,
- # recurse twice, ..., until more recursion offers no benefit (we hit the bound).
- #
- # The "trick" here is that each ``Forward`` gets evaluated in two contexts
- # - to *match* a specific recursion level, and
- # - to *search* the bounded recursion level
- # and the two run concurrently. The *search* must *match* each recursion level
- # to find the best possible match. This is handled by a memo table, which
- # provides the previous match to the next level match attempt.
- #
- # See also "Left Recursion in Parsing Expression Grammars", Medeiros et al.
- #
- # There is a complication since we not only *parse* but also *transform* via
- # actions: We do not want to run the actions too often while expanding. Thus,
- # we expand using `doActions=False` and only run `doActions=True` if the next
- # recursion level is acceptable.
- with ParserElement.recursion_lock:
- memo = ParserElement.recursion_memos
- try:
- # we are parsing at a specific recursion expansion - use it as-is
- prev_loc, prev_result = memo[loc, self, doActions]
- if isinstance(prev_result, Exception):
- raise prev_result
- return prev_loc, prev_result.copy()
- except KeyError:
- act_key = (loc, self, True)
- peek_key = (loc, self, False)
- # we are searching for the best recursion expansion - keep on improving
- # both `doActions` cases must be tracked separately here!
- prev_loc, prev_peek = memo[peek_key] = (
- loc - 1,
- ParseException(
- instring, loc, "Forward recursion without base case", self
- ),
- )
- if doActions:
- memo[act_key] = memo[peek_key]
- while True:
- try:
- new_loc, new_peek = super().parseImpl(instring, loc, False)
- except ParseException:
- # we failed before getting any match – do not hide the error
- if isinstance(prev_peek, Exception):
- raise
- new_loc, new_peek = prev_loc, prev_peek
- # the match did not get better: we are done
- if new_loc <= prev_loc:
- if doActions:
- # replace the match for doActions=False as well,
- # in case the action did backtrack
- prev_loc, prev_result = memo[peek_key] = memo[act_key]
- del memo[peek_key], memo[act_key]
- return prev_loc, prev_result.copy()
- del memo[peek_key]
- return prev_loc, prev_peek.copy()
- # the match did get better: see if we can improve further
- else:
- if doActions:
- try:
- memo[act_key] = super().parseImpl(instring, loc, True)
- except ParseException as e:
- memo[peek_key] = memo[act_key] = (new_loc, e)
- raise
- prev_loc, prev_peek = memo[peek_key] = new_loc, new_peek
-
- def leave_whitespace(self, recursive: bool = True) -> ParserElement:
- self.skipWhitespace = False
- return self
-
- def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
- self.skipWhitespace = True
- return self
-
- def streamline(self) -> ParserElement:
- if not self.streamlined:
- self.streamlined = True
- if self.expr is not None:
- self.expr.streamline()
- return self
-
- def validate(self, validateTrace=None) -> None:
- if validateTrace is None:
- validateTrace = []
-
- if self not in validateTrace:
- tmp = validateTrace[:] + [self]
- if self.expr is not None:
- self.expr.validate(tmp)
- self._checkRecursion([])
-
- def _generateDefaultName(self):
- # Avoid infinite recursion by setting a temporary _defaultName
- self._defaultName = ": ..."
-
- # Use the string representation of main expression.
- retString = "..."
- try:
- if self.expr is not None:
- retString = str(self.expr)[:1000]
- else:
- retString = "None"
- finally:
- return self.__class__.__name__ + ": " + retString
-
- def copy(self) -> ParserElement:
- if self.expr is not None:
- return super().copy()
- else:
- ret = Forward()
- ret <<= self
- return ret
-
- def _setResultsName(self, name, list_all_matches=False):
- if (
- __diag__.warn_name_set_on_empty_Forward
- and Diagnostics.warn_name_set_on_empty_Forward
- not in self.suppress_warnings_
- ):
- if self.expr is None:
- warnings.warn(
- "{}: setting results name {!r} on {} expression "
- "that has no contained expression".format(
- "warn_name_set_on_empty_Forward", name, type(self).__name__
- ),
- stacklevel=3,
- )
-
- return super()._setResultsName(name, list_all_matches)
-
- ignoreWhitespace = ignore_whitespace
- leaveWhitespace = leave_whitespace
-
-
-class TokenConverter(ParseElementEnhance):
- """
- Abstract subclass of :class:`ParseExpression`, for converting parsed results.
- """
-
- def __init__(self, expr: Union[ParserElement, str], savelist=False):
- super().__init__(expr) # , savelist)
- self.saveAsList = False
-
-
-class Combine(TokenConverter):
- """Converter to concatenate all matching tokens to a single string.
- By default, the matching patterns must also be contiguous in the
- input string; this can be disabled by specifying
- ``'adjacent=False'`` in the constructor.
-
- Example::
-
- real = Word(nums) + '.' + Word(nums)
- print(real.parse_string('3.1416')) # -> ['3', '.', '1416']
- # will also erroneously match the following
- print(real.parse_string('3. 1416')) # -> ['3', '.', '1416']
-
- real = Combine(Word(nums) + '.' + Word(nums))
- print(real.parse_string('3.1416')) # -> ['3.1416']
- # no match when there are internal spaces
- print(real.parse_string('3. 1416')) # -> Exception: Expected W:(0123...)
- """
-
- def __init__(
- self,
- expr: ParserElement,
- join_string: str = "",
- adjacent: bool = True,
- *,
- joinString: typing.Optional[str] = None,
- ):
- super().__init__(expr)
- joinString = joinString if joinString is not None else join_string
- # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
- if adjacent:
- self.leave_whitespace()
- self.adjacent = adjacent
- self.skipWhitespace = True
- self.joinString = joinString
- self.callPreparse = True
-
- def ignore(self, other) -> ParserElement:
- if self.adjacent:
- ParserElement.ignore(self, other)
- else:
- super().ignore(other)
- return self
-
- def postParse(self, instring, loc, tokenlist):
- retToks = tokenlist.copy()
- del retToks[:]
- retToks += ParseResults(
- ["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults
- )
-
- if self.resultsName and retToks.haskeys():
- return [retToks]
- else:
- return retToks
-
-
-class Group(TokenConverter):
- """Converter to return the matched tokens as a list - useful for
- returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions.
-
- The optional ``aslist`` argument when set to True will return the
- parsed tokens as a Python list instead of a pyparsing ParseResults.
-
- Example::
-
- ident = Word(alphas)
- num = Word(nums)
- term = ident | num
- func = ident + Opt(delimited_list(term))
- print(func.parse_string("fn a, b, 100"))
- # -> ['fn', 'a', 'b', '100']
-
- func = ident + Group(Opt(delimited_list(term)))
- print(func.parse_string("fn a, b, 100"))
- # -> ['fn', ['a', 'b', '100']]
- """
-
- def __init__(self, expr: ParserElement, aslist: bool = False):
- super().__init__(expr)
- self.saveAsList = True
- self._asPythonList = aslist
-
- def postParse(self, instring, loc, tokenlist):
- if self._asPythonList:
- return ParseResults.List(
- tokenlist.asList()
- if isinstance(tokenlist, ParseResults)
- else list(tokenlist)
- )
- else:
- return [tokenlist]
-
-
-class Dict(TokenConverter):
- """Converter to return a repetitive expression as a list, but also
- as a dictionary. Each element can also be referenced using the first
- token in the expression as its key. Useful for tabular report
- scraping when the first column can be used as a item key.
-
- The optional ``asdict`` argument when set to True will return the
- parsed tokens as a Python dict instead of a pyparsing ParseResults.
-
- Example::
-
- data_word = Word(alphas)
- label = data_word + FollowedBy(':')
-
- text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
- attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
-
- # print attributes as plain groups
- print(attr_expr[1, ...].parse_string(text).dump())
-
- # instead of OneOrMore(expr), parse using Dict(Group(expr)[1, ...]) - Dict will auto-assign names
- result = Dict(Group(attr_expr)[1, ...]).parse_string(text)
- print(result.dump())
-
- # access named fields as dict entries, or output as dict
- print(result['shape'])
- print(result.as_dict())
-
- prints::
-
- ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
- [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- - color: 'light blue'
- - posn: 'upper left'
- - shape: 'SQUARE'
- - texture: 'burlap'
- SQUARE
- {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
-
- See more examples at :class:`ParseResults` of accessing fields by results name.
- """
-
- def __init__(self, expr: ParserElement, asdict: bool = False):
- super().__init__(expr)
- self.saveAsList = True
- self._asPythonDict = asdict
-
- def postParse(self, instring, loc, tokenlist):
- for i, tok in enumerate(tokenlist):
- if len(tok) == 0:
- continue
-
- ikey = tok[0]
- if isinstance(ikey, int):
- ikey = str(ikey).strip()
-
- if len(tok) == 1:
- tokenlist[ikey] = _ParseResultsWithOffset("", i)
-
- elif len(tok) == 2 and not isinstance(tok[1], ParseResults):
- tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i)
-
- else:
- try:
- dictvalue = tok.copy() # ParseResults(i)
- except Exception:
- exc = TypeError(
- "could not extract dict values from parsed results"
- " - Dict expression must contain Grouped expressions"
- )
- raise exc from None
-
- del dictvalue[0]
-
- if len(dictvalue) != 1 or (
- isinstance(dictvalue, ParseResults) and dictvalue.haskeys()
- ):
- tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i)
- else:
- tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i)
-
- if self._asPythonDict:
- return [tokenlist.as_dict()] if self.resultsName else tokenlist.as_dict()
- else:
- return [tokenlist] if self.resultsName else tokenlist
-
-
-class Suppress(TokenConverter):
- """Converter for ignoring the results of a parsed expression.
-
- Example::
-
- source = "a, b, c,d"
- wd = Word(alphas)
- wd_list1 = wd + (',' + wd)[...]
- print(wd_list1.parse_string(source))
-
- # often, delimiters that are useful during parsing are just in the
- # way afterward - use Suppress to keep them out of the parsed output
- wd_list2 = wd + (Suppress(',') + wd)[...]
- print(wd_list2.parse_string(source))
-
- # Skipped text (using '...') can be suppressed as well
- source = "lead in START relevant text END trailing text"
- start_marker = Keyword("START")
- end_marker = Keyword("END")
- find_body = Suppress(...) + start_marker + ... + end_marker
- print(find_body.parse_string(source)
-
- prints::
-
- ['a', ',', 'b', ',', 'c', ',', 'd']
- ['a', 'b', 'c', 'd']
- ['START', 'relevant text ', 'END']
-
- (See also :class:`delimited_list`.)
- """
-
- def __init__(self, expr: Union[ParserElement, str], savelist: bool = False):
- if expr is ...:
- expr = _PendingSkip(NoMatch())
- super().__init__(expr)
-
- def __add__(self, other) -> "ParserElement":
- if isinstance(self.expr, _PendingSkip):
- return Suppress(SkipTo(other)) + other
- else:
- return super().__add__(other)
-
- def __sub__(self, other) -> "ParserElement":
- if isinstance(self.expr, _PendingSkip):
- return Suppress(SkipTo(other)) - other
- else:
- return super().__sub__(other)
-
- def postParse(self, instring, loc, tokenlist):
- return []
-
- def suppress(self) -> ParserElement:
- return self
-
-
-def trace_parse_action(f: ParseAction) -> ParseAction:
- """Decorator for debugging parse actions.
-
- When the parse action is called, this decorator will print
- ``">> entering method-name(line:, , )"``.
- When the parse action completes, the decorator will print
- ``"<<"`` followed by the returned value, or any exception that the parse action raised.
-
- Example::
-
- wd = Word(alphas)
-
- @trace_parse_action
- def remove_duplicate_chars(tokens):
- return ''.join(sorted(set(''.join(tokens))))
-
- wds = wd[1, ...].set_parse_action(remove_duplicate_chars)
- print(wds.parse_string("slkdjs sld sldd sdlf sdljf"))
-
- prints::
-
- >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
- < 3:
- thisFunc = paArgs[0].__class__.__name__ + "." + thisFunc
- sys.stderr.write(
- ">>entering {}(line: {!r}, {}, {!r})\n".format(thisFunc, line(l, s), l, t)
- )
- try:
- ret = f(*paArgs)
- except Exception as exc:
- sys.stderr.write("< str:
- r"""Helper to easily define string ranges for use in :class:`Word`
- construction. Borrows syntax from regexp ``'[]'`` string range
- definitions::
-
- srange("[0-9]") -> "0123456789"
- srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
- srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
-
- The input string must be enclosed in []'s, and the returned string
- is the expanded character set joined into a single string. The
- values enclosed in the []'s may be:
-
- - a single character
- - an escaped character with a leading backslash (such as ``\-``
- or ``\]``)
- - an escaped hex character with a leading ``'\x'``
- (``\x21``, which is a ``'!'`` character) (``\0x##``
- is also supported for backwards compatibility)
- - an escaped octal character with a leading ``'\0'``
- (``\041``, which is a ``'!'`` character)
- - a range of any of the above, separated by a dash (``'a-z'``,
- etc.)
- - any combination of the above (``'aeiouy'``,
- ``'a-zA-Z0-9_$'``, etc.)
- """
- _expanded = (
- lambda p: p
- if not isinstance(p, ParseResults)
- else "".join(chr(c) for c in range(ord(p[0]), ord(p[1]) + 1))
- )
- try:
- return "".join(_expanded(part) for part in _reBracketExpr.parse_string(s).body)
- except Exception:
- return ""
-
-
-def token_map(func, *args) -> ParseAction:
- """Helper to define a parse action by mapping a function to all
- elements of a :class:`ParseResults` list. If any additional args are passed,
- they are forwarded to the given function as additional arguments
- after the token, as in
- ``hex_integer = Word(hexnums).set_parse_action(token_map(int, 16))``,
- which will convert the parsed data to an integer using base 16.
-
- Example (compare the last to example in :class:`ParserElement.transform_string`::
-
- hex_ints = Word(hexnums)[1, ...].set_parse_action(token_map(int, 16))
- hex_ints.run_tests('''
- 00 11 22 aa FF 0a 0d 1a
- ''')
-
- upperword = Word(alphas).set_parse_action(token_map(str.upper))
- upperword[1, ...].run_tests('''
- my kingdom for a horse
- ''')
-
- wd = Word(alphas).set_parse_action(token_map(str.title))
- wd[1, ...].set_parse_action(' '.join).run_tests('''
- now is the winter of our discontent made glorious summer by this sun of york
- ''')
-
- prints::
-
- 00 11 22 aa FF 0a 0d 1a
- [0, 17, 34, 170, 255, 10, 13, 26]
-
- my kingdom for a horse
- ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
-
- now is the winter of our discontent made glorious summer by this sun of york
- ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
- """
-
- def pa(s, l, t):
- return [func(tokn, *args) for tokn in t]
-
- func_name = getattr(func, "__name__", getattr(func, "__class__").__name__)
- pa.__name__ = func_name
-
- return pa
-
-
-def autoname_elements() -> None:
- """
- Utility to simplify mass-naming of parser elements, for
- generating railroad diagram with named subdiagrams.
- """
- for name, var in sys._getframe().f_back.f_locals.items():
- if isinstance(var, ParserElement) and not var.customName:
- var.set_name(name)
-
-
-dbl_quoted_string = Combine(
- Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
-).set_name("string enclosed in double quotes")
-
-sgl_quoted_string = Combine(
- Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'"
-).set_name("string enclosed in single quotes")
-
-quoted_string = Combine(
- Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
- | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'"
-).set_name("quotedString using single or double quotes")
-
-unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal")
-
-
-alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
-punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
-
-# build list of built-in expressions, for future reference if a global default value
-# gets updated
-_builtin_exprs: List[ParserElement] = [
- v for v in vars().values() if isinstance(v, ParserElement)
-]
-
-# backward compatibility names
-tokenMap = token_map
-conditionAsParseAction = condition_as_parse_action
-nullDebugAction = null_debug_action
-sglQuotedString = sgl_quoted_string
-dblQuotedString = dbl_quoted_string
-quotedString = quoted_string
-unicodeString = unicode_string
-lineStart = line_start
-lineEnd = line_end
-stringStart = string_start
-stringEnd = string_end
-traceParseAction = trace_parse_action
diff --git a/spaces/Realcat/image-matching-webui/third_party/DKM/dkm/benchmarks/deprecated/hpatches_sequences_dense_benchmark.py b/spaces/Realcat/image-matching-webui/third_party/DKM/dkm/benchmarks/deprecated/hpatches_sequences_dense_benchmark.py
deleted file mode 100644
index 079622fdaf77c75aeadd675629f2512c45d04c2d..0000000000000000000000000000000000000000
--- a/spaces/Realcat/image-matching-webui/third_party/DKM/dkm/benchmarks/deprecated/hpatches_sequences_dense_benchmark.py
+++ /dev/null
@@ -1,100 +0,0 @@
-from PIL import Image
-import numpy as np
-
-import os
-
-import torch
-from tqdm import tqdm
-
-from dkm.utils import *
-
-
-class HpatchesDenseBenchmark:
- """WARNING: HPATCHES grid goes from [0,n-1] instead of [0.5,n-0.5]"""
-
- def __init__(self, dataset_path) -> None:
- seqs_dir = "hpatches-sequences-release"
- self.seqs_path = os.path.join(dataset_path, seqs_dir)
- self.seq_names = sorted(os.listdir(self.seqs_path))
-
- def convert_coordinates(self, query_coords, query_to_support, wq, hq, wsup, hsup):
- # Get matches in output format on the grid [0, n] where the center of the top-left coordinate is [0.5, 0.5]
- offset = (
- 0.5 # Hpatches assumes that the center of the top-left pixel is at [0,0]
- )
- query_coords = (
- torch.stack(
- (
- wq * (query_coords[..., 0] + 1) / 2,
- hq * (query_coords[..., 1] + 1) / 2,
- ),
- axis=-1,
- )
- - offset
- )
- query_to_support = (
- torch.stack(
- (
- wsup * (query_to_support[..., 0] + 1) / 2,
- hsup * (query_to_support[..., 1] + 1) / 2,
- ),
- axis=-1,
- )
- - offset
- )
- return query_coords, query_to_support
-
- def inside_image(self, x, w, h):
- return torch.logical_and(
- x[:, 0] < (w - 1),
- torch.logical_and(x[:, 1] < (h - 1), (x > 0).prod(dim=-1)),
- )
-
- def benchmark(self, model):
- use_cuda = torch.cuda.is_available()
- device = torch.device("cuda:0" if use_cuda else "cpu")
- aepes = []
- pcks = []
- for seq_idx, seq_name in tqdm(
- enumerate(self.seq_names), total=len(self.seq_names)
- ):
- if seq_name[0] == "i":
- continue
- im1_path = os.path.join(self.seqs_path, seq_name, "1.ppm")
- im1 = Image.open(im1_path)
- w1, h1 = im1.size
- for im_idx in range(2, 7):
- im2_path = os.path.join(self.seqs_path, seq_name, f"{im_idx}.ppm")
- im2 = Image.open(im2_path)
- w2, h2 = im2.size
- matches, certainty = model.match(im2, im1, do_pred_in_og_res=True)
- matches, certainty = matches.reshape(-1, 4), certainty.reshape(-1)
- inv_homography = torch.from_numpy(
- np.loadtxt(
- os.path.join(self.seqs_path, seq_name, "H_1_" + str(im_idx))
- )
- ).to(device)
- homography = torch.linalg.inv(inv_homography)
- pos_a, pos_b = self.convert_coordinates(
- matches[:, :2], matches[:, 2:], w2, h2, w1, h1
- )
- pos_a, pos_b = pos_a.double(), pos_b.double()
- pos_a_h = torch.cat(
- [pos_a, torch.ones([pos_a.shape[0], 1], device=device)], dim=1
- )
- pos_b_proj_h = (homography @ pos_a_h.t()).t()
- pos_b_proj = pos_b_proj_h[:, :2] / pos_b_proj_h[:, 2:]
- mask = self.inside_image(pos_b_proj, w1, h1)
- residual = pos_b - pos_b_proj
- dist = (residual**2).sum(dim=1).sqrt()[mask]
- aepes.append(torch.mean(dist).item())
- pck1 = (dist < 1.0).float().mean().item()
- pck3 = (dist < 3.0).float().mean().item()
- pck5 = (dist < 5.0).float().mean().item()
- pcks.append([pck1, pck3, pck5])
- m_pcks = np.mean(np.array(pcks), axis=0)
- return {
- "hp_pck1": m_pcks[0],
- "hp_pck3": m_pcks[1],
- "hp_pck5": m_pcks[2],
- }
diff --git a/spaces/Rongjiehuang/GenerSpeech/data_gen/tts/txt_processors/base_text_processor.py b/spaces/Rongjiehuang/GenerSpeech/data_gen/tts/txt_processors/base_text_processor.py
deleted file mode 100644
index 69d51201dcb191c1c208ae1c87a34b5c97e6307f..0000000000000000000000000000000000000000
--- a/spaces/Rongjiehuang/GenerSpeech/data_gen/tts/txt_processors/base_text_processor.py
+++ /dev/null
@@ -1,47 +0,0 @@
-from data_gen.tts.data_gen_utils import is_sil_phoneme
-
-REGISTERED_TEXT_PROCESSORS = {}
-
-def register_txt_processors(name):
- def _f(cls):
- REGISTERED_TEXT_PROCESSORS[name] = cls
- return cls
-
- return _f
-
-
-def get_txt_processor_cls(name):
- return REGISTERED_TEXT_PROCESSORS.get(name, None)
-
-
-class BaseTxtProcessor:
- @staticmethod
- def sp_phonemes():
- return ['|']
-
- @classmethod
- def process(cls, txt, preprocess_args):
- raise NotImplementedError
-
- @classmethod
- def postprocess(cls, txt_struct, preprocess_args):
- # remove sil phoneme in head and tail
- while len(txt_struct) > 0 and is_sil_phoneme(txt_struct[0][0]):
- txt_struct = txt_struct[1:]
- while len(txt_struct) > 0 and is_sil_phoneme(txt_struct[-1][0]):
- txt_struct = txt_struct[:-1]
- if preprocess_args['with_phsep']:
- txt_struct = cls.add_bdr(txt_struct)
- if preprocess_args['add_eos_bos']:
- txt_struct = [["", [""]]] + txt_struct + [["", [""]]]
- return txt_struct
-
- @classmethod
- def add_bdr(cls, txt_struct):
- txt_struct_ = []
- for i, ts in enumerate(txt_struct):
- txt_struct_.append(ts)
- if i != len(txt_struct) - 1 and \
- not is_sil_phoneme(txt_struct[i][0]) and not is_sil_phoneme(txt_struct[i + 1][0]):
- txt_struct_.append(['|', ['|']])
- return txt_struct_
\ No newline at end of file
diff --git a/spaces/SIGGRAPH2022/Text2Human/Text2Human/utils/language_utils.py b/spaces/SIGGRAPH2022/Text2Human/Text2Human/utils/language_utils.py
deleted file mode 100644
index bb2ef69b3001f10b20069f40ec0141d28260482f..0000000000000000000000000000000000000000
--- a/spaces/SIGGRAPH2022/Text2Human/Text2Human/utils/language_utils.py
+++ /dev/null
@@ -1,315 +0,0 @@
-from curses import A_ATTRIBUTES
-
-import numpy
-import torch
-from pip import main
-from sentence_transformers import SentenceTransformer, util
-
-# predefined shape text
-upper_length_text = [
- 'sleeveless', 'without sleeves', 'sleeves have been cut off', 'tank top',
- 'tank shirt', 'muscle shirt', 'short-sleeve', 'short sleeves',
- 'with short sleeves', 'medium-sleeve', 'medium sleeves',
- 'with medium sleeves', 'sleeves reach elbow', 'long-sleeve',
- 'long sleeves', 'with long sleeves'
-]
-upper_length_attr = {
- 'sleeveless': 0,
- 'without sleeves': 0,
- 'sleeves have been cut off': 0,
- 'tank top': 0,
- 'tank shirt': 0,
- 'muscle shirt': 0,
- 'short-sleeve': 1,
- 'with short sleeves': 1,
- 'short sleeves': 1,
- 'medium-sleeve': 2,
- 'with medium sleeves': 2,
- 'medium sleeves': 2,
- 'sleeves reach elbow': 2,
- 'long-sleeve': 3,
- 'long sleeves': 3,
- 'with long sleeves': 3
-}
-lower_length_text = [
- 'three-point', 'medium', 'short', 'covering knee', 'cropped',
- 'three-quarter', 'long', 'slack', 'of long length'
-]
-lower_length_attr = {
- 'three-point': 0,
- 'medium': 1,
- 'covering knee': 1,
- 'short': 1,
- 'cropped': 2,
- 'three-quarter': 2,
- 'long': 3,
- 'slack': 3,
- 'of long length': 3
-}
-socks_length_text = [
- 'socks', 'stocking', 'pantyhose', 'leggings', 'sheer hosiery'
-]
-socks_length_attr = {
- 'socks': 0,
- 'stocking': 1,
- 'pantyhose': 1,
- 'leggings': 1,
- 'sheer hosiery': 1
-}
-hat_text = ['hat', 'cap', 'chapeau']
-eyeglasses_text = ['sunglasses']
-belt_text = ['belt', 'with a dress tied around the waist']
-outer_shape_text = [
- 'with outer clothing open', 'with outer clothing unzipped',
- 'covering inner clothes', 'with outer clothing zipped'
-]
-outer_shape_attr = {
- 'with outer clothing open': 0,
- 'with outer clothing unzipped': 0,
- 'covering inner clothes': 1,
- 'with outer clothing zipped': 1
-}
-
-upper_types = [
- 'T-shirt', 'shirt', 'sweater', 'hoodie', 'tops', 'blouse', 'Basic Tee'
-]
-outer_types = [
- 'jacket', 'outer clothing', 'coat', 'overcoat', 'blazer', 'outerwear',
- 'duffle', 'cardigan'
-]
-skirt_types = ['skirt']
-dress_types = ['dress']
-pant_types = ['jeans', 'pants', 'trousers']
-rompers_types = ['rompers', 'bodysuit', 'jumpsuit']
-
-attr_names_list = [
- 'gender', 'hair length', '0 upper clothing length',
- '1 lower clothing length', '2 socks', '3 hat', '4 eyeglasses', '5 belt',
- '6 opening of outer clothing', '7 upper clothes', '8 outer clothing',
- '9 skirt', '10 dress', '11 pants', '12 rompers'
-]
-
-
-def generate_shape_attributes(user_shape_texts):
- model = SentenceTransformer('all-MiniLM-L6-v2')
- parsed_texts = user_shape_texts.split(',')
-
- text_num = len(parsed_texts)
-
- human_attr = [0, 0]
- attr = [1, 3, 0, 0, 0, 3, 1, 1, 0, 0, 0, 0, 0]
-
- changed = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
- for text_id, text in enumerate(parsed_texts):
- user_embeddings = model.encode(text)
- if ('man' in text) and (text_id == 0):
- human_attr[0] = 0
- human_attr[1] = 0
-
- if ('woman' in text or 'lady' in text) and (text_id == 0):
- human_attr[0] = 1
- human_attr[1] = 2
-
- if (not changed[0]) and (text_id == 1):
- # upper length
- predefined_embeddings = model.encode(upper_length_text)
- similarities = util.dot_score(user_embeddings,
- predefined_embeddings)
- arg_idx = torch.argmax(similarities).item()
- attr[0] = upper_length_attr[upper_length_text[arg_idx]]
- changed[0] = 1
-
- if (not changed[1]) and ((text_num == 2 and text_id == 1) or
- (text_num > 2 and text_id == 2)):
- # lower length
- predefined_embeddings = model.encode(lower_length_text)
- similarities = util.dot_score(user_embeddings,
- predefined_embeddings)
- arg_idx = torch.argmax(similarities).item()
- attr[1] = lower_length_attr[lower_length_text[arg_idx]]
- changed[1] = 1
-
- if (not changed[2]) and (text_id > 2):
- # socks length
- predefined_embeddings = model.encode(socks_length_text)
- similarities = util.dot_score(user_embeddings,
- predefined_embeddings)
- arg_idx = torch.argmax(similarities).item()
- if similarities[0][arg_idx] > 0.7:
- attr[2] = arg_idx + 1
- changed[2] = 1
-
- if (not changed[3]) and (text_id > 2):
- # hat
- predefined_embeddings = model.encode(hat_text)
- similarities = util.dot_score(user_embeddings,
- predefined_embeddings)
- if similarities[0][0] > 0.7:
- attr[3] = 1
- changed[3] = 1
-
- if (not changed[4]) and (text_id > 2):
- # glasses
- predefined_embeddings = model.encode(eyeglasses_text)
- similarities = util.dot_score(user_embeddings,
- predefined_embeddings)
- arg_idx = torch.argmax(similarities).item()
- if similarities[0][arg_idx] > 0.7:
- attr[4] = arg_idx + 1
- changed[4] = 1
-
- if (not changed[5]) and (text_id > 2):
- # belt
- predefined_embeddings = model.encode(belt_text)
- similarities = util.dot_score(user_embeddings,
- predefined_embeddings)
- arg_idx = torch.argmax(similarities).item()
- if similarities[0][arg_idx] > 0.7:
- attr[5] = arg_idx + 1
- changed[5] = 1
-
- if (not changed[6]) and (text_id == 3):
- # outer coverage
- predefined_embeddings = model.encode(outer_shape_text)
- similarities = util.dot_score(user_embeddings,
- predefined_embeddings)
- arg_idx = torch.argmax(similarities).item()
- if similarities[0][arg_idx] > 0.7:
- attr[6] = arg_idx
- changed[6] = 1
-
- if (not changed[10]) and (text_num == 2 and text_id == 1):
- # dress_types
- predefined_embeddings = model.encode(dress_types)
- similarities = util.dot_score(user_embeddings,
- predefined_embeddings)
- similarity_skirt = util.dot_score(user_embeddings,
- model.encode(skirt_types))
- if similarities[0][0] > 0.5 and similarities[0][
- 0] > similarity_skirt[0][0]:
- attr[10] = 1
- attr[7] = 0
- attr[8] = 0
- attr[9] = 0
- attr[11] = 0
- attr[12] = 0
-
- changed[0] = 1
- changed[10] = 1
- changed[7] = 1
- changed[8] = 1
- changed[9] = 1
- changed[11] = 1
- changed[12] = 1
-
- if (not changed[12]) and (text_num == 2 and text_id == 1):
- # rompers_types
- predefined_embeddings = model.encode(rompers_types)
- similarities = util.dot_score(user_embeddings,
- predefined_embeddings)
- max_similarity = torch.max(similarities).item()
- if max_similarity > 0.6:
- attr[12] = 1
- attr[7] = 0
- attr[8] = 0
- attr[9] = 0
- attr[10] = 0
- attr[11] = 0
-
- changed[12] = 1
- changed[7] = 1
- changed[8] = 1
- changed[9] = 1
- changed[10] = 1
- changed[11] = 1
-
- if (not changed[7]) and (text_num > 2 and text_id == 1):
- # upper_types
- predefined_embeddings = model.encode(upper_types)
- similarities = util.dot_score(user_embeddings,
- predefined_embeddings)
- max_similarity = torch.max(similarities).item()
- if max_similarity > 0.6:
- attr[7] = 1
- changed[7] = 1
-
- if (not changed[8]) and (text_id == 3):
- # outer_types
- predefined_embeddings = model.encode(outer_types)
- similarities = util.dot_score(user_embeddings,
- predefined_embeddings)
- arg_idx = torch.argmax(similarities).item()
- if similarities[0][arg_idx] > 0.7:
- attr[6] = outer_shape_attr[outer_shape_text[arg_idx]]
- attr[8] = 1
- changed[8] = 1
-
- if (not changed[9]) and (text_num > 2 and text_id == 2):
- # skirt_types
- predefined_embeddings = model.encode(skirt_types)
- similarity_skirt = util.dot_score(user_embeddings,
- predefined_embeddings)
- similarity_dress = util.dot_score(user_embeddings,
- model.encode(dress_types))
- if similarity_skirt[0][0] > 0.7 and similarity_skirt[0][
- 0] > similarity_dress[0][0]:
- attr[9] = 1
- attr[10] = 0
- changed[9] = 1
- changed[10] = 1
-
- if (not changed[11]) and (text_num > 2 and text_id == 2):
- # pant_types
- predefined_embeddings = model.encode(pant_types)
- similarities = util.dot_score(user_embeddings,
- predefined_embeddings)
- max_similarity = torch.max(similarities).item()
- if max_similarity > 0.6:
- attr[11] = 1
- attr[9] = 0
- attr[10] = 0
- attr[12] = 0
- changed[11] = 1
- changed[9] = 1
- changed[10] = 1
- changed[12] = 1
-
- return human_attr + attr
-
-
-def generate_texture_attributes(user_text):
- parsed_texts = user_text.split(',')
-
- attr = []
- for text in parsed_texts:
- if ('pure color' in text) or ('solid color' in text):
- attr.append(4)
- elif ('spline' in text) or ('stripe' in text):
- attr.append(3)
- elif ('plaid' in text) or ('lattice' in text):
- attr.append(5)
- elif 'floral' in text:
- attr.append(1)
- elif 'denim' in text:
- attr.append(0)
- else:
- attr.append(17)
-
- if len(attr) == 1:
- attr.append(attr[0])
- attr.append(17)
-
- if len(attr) == 2:
- attr.append(17)
-
- return attr
-
-
-if __name__ == "__main__":
- user_request = input('Enter your request: ')
- while user_request != '\\q':
- attr = generate_shape_attributes(user_request)
- print(attr)
- for attr_name, attr_value in zip(attr_names_list, attr):
- print(attr_name, attr_value)
- user_request = input('Enter your request: ')
diff --git a/spaces/Sadashiv/BERT-NER/app.py b/spaces/Sadashiv/BERT-NER/app.py
deleted file mode 100644
index 2982da0698f377884f120527b37d4414604c1a1d..0000000000000000000000000000000000000000
--- a/spaces/Sadashiv/BERT-NER/app.py
+++ /dev/null
@@ -1,12 +0,0 @@
-import streamlit as st
-from annotated_text import annotated_text
-from utils import ner_extraction
-
-st.title('NER with Fined-Tuned BERT model')
-
-input_text = st.text_area("Please Enter the text..")
-
-ner_extraction = ner_extraction(input_text)
-
-if st.button('Submit'):
- annotated_text(ner_extraction.entity_position())
diff --git a/spaces/Salesforce/EDICT/my_half_diffusers/models/resnet.py b/spaces/Salesforce/EDICT/my_half_diffusers/models/resnet.py
deleted file mode 100644
index 0439aff823242b9e9f9e504db6fbd69702f190cc..0000000000000000000000000000000000000000
--- a/spaces/Salesforce/EDICT/my_half_diffusers/models/resnet.py
+++ /dev/null
@@ -1,483 +0,0 @@
-from functools import partial
-
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-
-class Upsample2D(nn.Module):
- """
- An upsampling layer with an optional convolution.
-
- :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is
- applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- upsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name="conv"):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.use_conv_transpose = use_conv_transpose
- self.name = name
-
- conv = None
- if use_conv_transpose:
- conv = nn.ConvTranspose2d(channels, self.out_channels, 4, 2, 1)
- elif use_conv:
- conv = nn.Conv2d(self.channels, self.out_channels, 3, padding=1)
-
- # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed
- if name == "conv":
- self.conv = conv
- else:
- self.Conv2d_0 = conv
-
- def forward(self, x):
- assert x.shape[1] == self.channels
- if self.use_conv_transpose:
- return self.conv(x)
-
- x = F.interpolate(x, scale_factor=2.0, mode="nearest")
-
- # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed
- if self.use_conv:
- if self.name == "conv":
- x = self.conv(x)
- else:
- x = self.Conv2d_0(x)
-
- return x
-
-
-class Downsample2D(nn.Module):
- """
- A downsampling layer with an optional convolution.
-
- :param channels: channels in the inputs and outputs. :param use_conv: a bool determining if a convolution is
- applied. :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- downsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name="conv"):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.padding = padding
- stride = 2
- self.name = name
-
- if use_conv:
- conv = nn.Conv2d(self.channels, self.out_channels, 3, stride=stride, padding=padding)
- else:
- assert self.channels == self.out_channels
- conv = nn.AvgPool2d(kernel_size=stride, stride=stride)
-
- # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed
- if name == "conv":
- self.Conv2d_0 = conv
- self.conv = conv
- elif name == "Conv2d_0":
- self.conv = conv
- else:
- self.conv = conv
-
- def forward(self, x):
- assert x.shape[1] == self.channels
- if self.use_conv and self.padding == 0:
- pad = (0, 1, 0, 1)
- x = F.pad(x, pad, mode="constant", value=0)
-
- assert x.shape[1] == self.channels
- x = self.conv(x)
-
- return x
-
-
-class FirUpsample2D(nn.Module):
- def __init__(self, channels=None, out_channels=None, use_conv=False, fir_kernel=(1, 3, 3, 1)):
- super().__init__()
- out_channels = out_channels if out_channels else channels
- if use_conv:
- self.Conv2d_0 = nn.Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1)
- self.use_conv = use_conv
- self.fir_kernel = fir_kernel
- self.out_channels = out_channels
-
- def _upsample_2d(self, x, weight=None, kernel=None, factor=2, gain=1):
- """Fused `upsample_2d()` followed by `Conv2d()`.
-
- Args:
- Padding is performed only once at the beginning, not between the operations. The fused op is considerably more
- efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary:
- order.
- x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
- C]`.
- weight: Weight tensor of the shape `[filterH, filterW, inChannels,
- outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.
- kernel: FIR filter of the shape `[firH, firW]` or `[firN]`
- (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling.
- factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0).
-
- Returns:
- Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same datatype as
- `x`.
- """
-
- assert isinstance(factor, int) and factor >= 1
-
- # Setup filter kernel.
- if kernel is None:
- kernel = [1] * factor
-
- # setup kernel
- kernel = np.asarray(kernel, dtype=np.float16)
- if kernel.ndim == 1:
- kernel = np.outer(kernel, kernel)
- kernel /= np.sum(kernel)
-
- kernel = kernel * (gain * (factor**2))
-
- if self.use_conv:
- convH = weight.shape[2]
- convW = weight.shape[3]
- inC = weight.shape[1]
-
- p = (kernel.shape[0] - factor) - (convW - 1)
-
- stride = (factor, factor)
- # Determine data dimensions.
- stride = [1, 1, factor, factor]
- output_shape = ((x.shape[2] - 1) * factor + convH, (x.shape[3] - 1) * factor + convW)
- output_padding = (
- output_shape[0] - (x.shape[2] - 1) * stride[0] - convH,
- output_shape[1] - (x.shape[3] - 1) * stride[1] - convW,
- )
- assert output_padding[0] >= 0 and output_padding[1] >= 0
- inC = weight.shape[1]
- num_groups = x.shape[1] // inC
-
- # Transpose weights.
- weight = torch.reshape(weight, (num_groups, -1, inC, convH, convW))
- weight = weight[..., ::-1, ::-1].permute(0, 2, 1, 3, 4)
- weight = torch.reshape(weight, (num_groups * inC, -1, convH, convW))
-
- x = F.conv_transpose2d(x, weight, stride=stride, output_padding=output_padding, padding=0)
-
- x = upfirdn2d_native(x, torch.tensor(kernel, device=x.device), pad=((p + 1) // 2 + factor - 1, p // 2 + 1))
- else:
- p = kernel.shape[0] - factor
- x = upfirdn2d_native(
- x, torch.tensor(kernel, device=x.device), up=factor, pad=((p + 1) // 2 + factor - 1, p // 2)
- )
-
- return x
-
- def forward(self, x):
- if self.use_conv:
- height = self._upsample_2d(x, self.Conv2d_0.weight, kernel=self.fir_kernel)
- height = height + self.Conv2d_0.bias.reshape(1, -1, 1, 1)
- else:
- height = self._upsample_2d(x, kernel=self.fir_kernel, factor=2)
-
- return height
-
-
-class FirDownsample2D(nn.Module):
- def __init__(self, channels=None, out_channels=None, use_conv=False, fir_kernel=(1, 3, 3, 1)):
- super().__init__()
- out_channels = out_channels if out_channels else channels
- if use_conv:
- self.Conv2d_0 = nn.Conv2d(channels, out_channels, kernel_size=3, stride=1, padding=1)
- self.fir_kernel = fir_kernel
- self.use_conv = use_conv
- self.out_channels = out_channels
-
- def _downsample_2d(self, x, weight=None, kernel=None, factor=2, gain=1):
- """Fused `Conv2d()` followed by `downsample_2d()`.
-
- Args:
- Padding is performed only once at the beginning, not between the operations. The fused op is considerably more
- efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary:
- order.
- x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. w: Weight tensor of the shape `[filterH,
- filterW, inChannels, outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] //
- numGroups`. k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] *
- factor`, which corresponds to average pooling. factor: Integer downsampling factor (default: 2). gain:
- Scaling factor for signal magnitude (default: 1.0).
-
- Returns:
- Tensor of the shape `[N, C, H // factor, W // factor]` or `[N, H // factor, W // factor, C]`, and same
- datatype as `x`.
- """
-
- assert isinstance(factor, int) and factor >= 1
- if kernel is None:
- kernel = [1] * factor
-
- # setup kernel
- kernel = np.asarray(kernel, dtype=np.float16)
- if kernel.ndim == 1:
- kernel = np.outer(kernel, kernel)
- kernel /= np.sum(kernel)
-
- kernel = kernel * gain
-
- if self.use_conv:
- _, _, convH, convW = weight.shape
- p = (kernel.shape[0] - factor) + (convW - 1)
- s = [factor, factor]
- x = upfirdn2d_native(x, torch.tensor(kernel, device=x.device), pad=((p + 1) // 2, p // 2))
- x = F.conv2d(x, weight, stride=s, padding=0)
- else:
- p = kernel.shape[0] - factor
- x = upfirdn2d_native(x, torch.tensor(kernel, device=x.device), down=factor, pad=((p + 1) // 2, p // 2))
-
- return x
-
- def forward(self, x):
- if self.use_conv:
- x = self._downsample_2d(x, weight=self.Conv2d_0.weight, kernel=self.fir_kernel)
- x = x + self.Conv2d_0.bias.reshape(1, -1, 1, 1)
- else:
- x = self._downsample_2d(x, kernel=self.fir_kernel, factor=2)
-
- return x
-
-
-class ResnetBlock2D(nn.Module):
- def __init__(
- self,
- *,
- in_channels,
- out_channels=None,
- conv_shortcut=False,
- dropout=0.0,
- temb_channels=512,
- groups=32,
- groups_out=None,
- pre_norm=True,
- eps=1e-6,
- non_linearity="swish",
- time_embedding_norm="default",
- kernel=None,
- output_scale_factor=1.0,
- use_nin_shortcut=None,
- up=False,
- down=False,
- ):
- super().__init__()
- self.pre_norm = pre_norm
- self.pre_norm = True
- self.in_channels = in_channels
- out_channels = in_channels if out_channels is None else out_channels
- self.out_channels = out_channels
- self.use_conv_shortcut = conv_shortcut
- self.time_embedding_norm = time_embedding_norm
- self.up = up
- self.down = down
- self.output_scale_factor = output_scale_factor
-
- if groups_out is None:
- groups_out = groups
-
- self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)
-
- self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
-
- if temb_channels is not None:
- self.time_emb_proj = torch.nn.Linear(temb_channels, out_channels)
- else:
- self.time_emb_proj = None
-
- self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)
- self.dropout = torch.nn.Dropout(dropout)
- self.conv2 = torch.nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
-
- if non_linearity == "swish":
- self.nonlinearity = lambda x: F.silu(x)
- elif non_linearity == "mish":
- self.nonlinearity = Mish()
- elif non_linearity == "silu":
- self.nonlinearity = nn.SiLU()
-
- self.upsample = self.downsample = None
- if self.up:
- if kernel == "fir":
- fir_kernel = (1, 3, 3, 1)
- self.upsample = lambda x: upsample_2d(x, kernel=fir_kernel)
- elif kernel == "sde_vp":
- self.upsample = partial(F.interpolate, scale_factor=2.0, mode="nearest")
- else:
- self.upsample = Upsample2D(in_channels, use_conv=False)
- elif self.down:
- if kernel == "fir":
- fir_kernel = (1, 3, 3, 1)
- self.downsample = lambda x: downsample_2d(x, kernel=fir_kernel)
- elif kernel == "sde_vp":
- self.downsample = partial(F.avg_pool2d, kernel_size=2, stride=2)
- else:
- self.downsample = Downsample2D(in_channels, use_conv=False, padding=1, name="op")
-
- self.use_nin_shortcut = self.in_channels != self.out_channels if use_nin_shortcut is None else use_nin_shortcut
-
- self.conv_shortcut = None
- if self.use_nin_shortcut:
- self.conv_shortcut = torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
-
- def forward(self, x, temb):
- hidden_states = x
-
- # make sure hidden states is in float32
- # when running in half-precision
- hidden_states = self.norm1(hidden_states).type(hidden_states.dtype)
- hidden_states = self.nonlinearity(hidden_states)
-
- if self.upsample is not None:
- x = self.upsample(x)
- hidden_states = self.upsample(hidden_states)
- elif self.downsample is not None:
- x = self.downsample(x)
- hidden_states = self.downsample(hidden_states)
-
- hidden_states = self.conv1(hidden_states)
-
- if temb is not None:
- temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None]
- hidden_states = hidden_states + temb
-
- # make sure hidden states is in float32
- # when running in half-precision
- hidden_states = self.norm2(hidden_states).type(hidden_states.dtype)
- hidden_states = self.nonlinearity(hidden_states)
-
- hidden_states = self.dropout(hidden_states)
- hidden_states = self.conv2(hidden_states)
-
- if self.conv_shortcut is not None:
- x = self.conv_shortcut(x)
-
- out = (x + hidden_states) / self.output_scale_factor
-
- return out
-
-
-class Mish(torch.nn.Module):
- def forward(self, x):
- return x * torch.tanh(torch.nn.functional.softplus(x))
-
-
-def upsample_2d(x, kernel=None, factor=2, gain=1):
- r"""Upsample2D a batch of 2D images with the given filter.
-
- Args:
- Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and upsamples each image with the given
- filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the specified
- `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its shape is a:
- multiple of the upsampling factor.
- x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
- C]`.
- k: FIR filter of the shape `[firH, firW]` or `[firN]`
- (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling.
- factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0).
-
- Returns:
- Tensor of the shape `[N, C, H * factor, W * factor]`
- """
- assert isinstance(factor, int) and factor >= 1
- if kernel is None:
- kernel = [1] * factor
-
- kernel = np.asarray(kernel, dtype=np.float16)
- if kernel.ndim == 1:
- kernel = np.outer(kernel, kernel)
- kernel /= np.sum(kernel)
-
- kernel = kernel * (gain * (factor**2))
- p = kernel.shape[0] - factor
- return upfirdn2d_native(
- x, torch.tensor(kernel, device=x.device), up=factor, pad=((p + 1) // 2 + factor - 1, p // 2)
- )
-
-
-def downsample_2d(x, kernel=None, factor=2, gain=1):
- r"""Downsample2D a batch of 2D images with the given filter.
-
- Args:
- Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and downsamples each image with the
- given filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the
- specified `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its
- shape is a multiple of the downsampling factor.
- x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,
- C]`.
- kernel: FIR filter of the shape `[firH, firW]` or `[firN]`
- (separable). The default is `[1] * factor`, which corresponds to average pooling.
- factor: Integer downsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0).
-
- Returns:
- Tensor of the shape `[N, C, H // factor, W // factor]`
- """
-
- assert isinstance(factor, int) and factor >= 1
- if kernel is None:
- kernel = [1] * factor
-
- kernel = np.asarray(kernel, dtype=np.float16)
- if kernel.ndim == 1:
- kernel = np.outer(kernel, kernel)
- kernel /= np.sum(kernel)
-
- kernel = kernel * gain
- p = kernel.shape[0] - factor
- return upfirdn2d_native(x, torch.tensor(kernel, device=x.device), down=factor, pad=((p + 1) // 2, p // 2))
-
-
-def upfirdn2d_native(input, kernel, up=1, down=1, pad=(0, 0)):
- up_x = up_y = up
- down_x = down_y = down
- pad_x0 = pad_y0 = pad[0]
- pad_x1 = pad_y1 = pad[1]
-
- _, channel, in_h, in_w = input.shape
- input = input.reshape(-1, in_h, in_w, 1)
-
- _, in_h, in_w, minor = input.shape
- kernel_h, kernel_w = kernel.shape
-
- out = input.view(-1, in_h, 1, in_w, 1, minor)
-
- # Temporary workaround for mps specific issue: https://github.com/pytorch/pytorch/issues/84535
- if input.device.type == "mps":
- out = out.to("cpu")
- out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
- out = out.view(-1, in_h * up_y, in_w * up_x, minor)
-
- out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)])
- out = out.to(input.device) # Move back to mps if necessary
- out = out[
- :,
- max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),
- max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),
- :,
- ]
-
- out = out.permute(0, 3, 1, 2)
- out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1])
- w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
- out = F.conv2d(out, w)
- out = out.reshape(
- -1,
- minor,
- in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
- in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
- )
- out = out.permute(0, 2, 3, 1)
- out = out[:, ::down_y, ::down_x, :]
-
- out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
- out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
-
- return out.view(-1, channel, out_h, out_w)
diff --git a/spaces/Senpaisora6/dreambooth-training/README.md b/spaces/Senpaisora6/dreambooth-training/README.md
deleted file mode 100644
index 2815830608092d6c5226e14cbf4947900f1f316d..0000000000000000000000000000000000000000
--- a/spaces/Senpaisora6/dreambooth-training/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Dreambooth Training
-emoji: 🌖
-colorFrom: pink
-colorTo: red
-sdk: gradio
-sdk_version: 3.10.1
-app_file: app.py
-pinned: false
-license: mit
-duplicated_from: multimodalart/dreambooth-training
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/SmallSpider/DeepDanbooru_string/README.md b/spaces/SmallSpider/DeepDanbooru_string/README.md
deleted file mode 100644
index 4330b6f969246dc764a34ea254d2e807159f1c55..0000000000000000000000000000000000000000
--- a/spaces/SmallSpider/DeepDanbooru_string/README.md
+++ /dev/null
@@ -1,39 +0,0 @@
----
-title: DeepDanbooru String
-emoji: 💬
-colorFrom: blue
-colorTo: red
-sdk: gradio
-sdk_version: 3.6
-app_file: app.py
-pinned: false
-duplicated_from: NoCrypt/DeepDanbooru_string
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio`, `streamlit`, or `static`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/Snb-ai/gpt2/app.py b/spaces/Snb-ai/gpt2/app.py
deleted file mode 100644
index 4205e03f91904065e1610f7e6c7b2f1de1771184..0000000000000000000000000000000000000000
--- a/spaces/Snb-ai/gpt2/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/gpt2").launch()
\ No newline at end of file
diff --git a/spaces/Soumahara/stablediffusionapi-anything-v5/README.md b/spaces/Soumahara/stablediffusionapi-anything-v5/README.md
deleted file mode 100644
index 8acd9db631365292227fb5ee8c4cc12b08f44ef5..0000000000000000000000000000000000000000
--- a/spaces/Soumahara/stablediffusionapi-anything-v5/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Stablediffusionapi Anything V5
-emoji: 🏆
-colorFrom: indigo
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.29.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/__init__.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Supawich/hololive_AI_fan_art_classifier/README.md b/spaces/Supawich/hololive_AI_fan_art_classifier/README.md
deleted file mode 100644
index 085f1cc8dfc1e288c4e4ef489f5b2ba78afb7562..0000000000000000000000000000000000000000
--- a/spaces/Supawich/hololive_AI_fan_art_classifier/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Hololive AI Fan Art Classifier
-emoji: 🌍
-colorFrom: gray
-colorTo: green
-sdk: gradio
-sdk_version: 3.29.0
-app_file: app.py
-pinned: false
-license: unknown
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/cnn/bricks/conv_ws.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/cnn/bricks/conv_ws.py
deleted file mode 100644
index a3941e27874993418b3b5708d5a7485f175ff9c8..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmcv/cnn/bricks/conv_ws.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .registry import CONV_LAYERS
-
-
-def conv_ws_2d(input,
- weight,
- bias=None,
- stride=1,
- padding=0,
- dilation=1,
- groups=1,
- eps=1e-5):
- c_in = weight.size(0)
- weight_flat = weight.view(c_in, -1)
- mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1)
- std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1)
- weight = (weight - mean) / (std + eps)
- return F.conv2d(input, weight, bias, stride, padding, dilation, groups)
-
-
-@CONV_LAYERS.register_module('ConvWS')
-class ConvWS2d(nn.Conv2d):
-
- def __init__(self,
- in_channels,
- out_channels,
- kernel_size,
- stride=1,
- padding=0,
- dilation=1,
- groups=1,
- bias=True,
- eps=1e-5):
- super(ConvWS2d, self).__init__(
- in_channels,
- out_channels,
- kernel_size,
- stride=stride,
- padding=padding,
- dilation=dilation,
- groups=groups,
- bias=bias)
- self.eps = eps
-
- def forward(self, x):
- return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding,
- self.dilation, self.groups, self.eps)
-
-
-@CONV_LAYERS.register_module(name='ConvAWS')
-class ConvAWS2d(nn.Conv2d):
- """AWS (Adaptive Weight Standardization)
-
- This is a variant of Weight Standardization
- (https://arxiv.org/pdf/1903.10520.pdf)
- It is used in DetectoRS to avoid NaN
- (https://arxiv.org/pdf/2006.02334.pdf)
-
- Args:
- in_channels (int): Number of channels in the input image
- out_channels (int): Number of channels produced by the convolution
- kernel_size (int or tuple): Size of the conv kernel
- stride (int or tuple, optional): Stride of the convolution. Default: 1
- padding (int or tuple, optional): Zero-padding added to both sides of
- the input. Default: 0
- dilation (int or tuple, optional): Spacing between kernel elements.
- Default: 1
- groups (int, optional): Number of blocked connections from input
- channels to output channels. Default: 1
- bias (bool, optional): If set True, adds a learnable bias to the
- output. Default: True
- """
-
- def __init__(self,
- in_channels,
- out_channels,
- kernel_size,
- stride=1,
- padding=0,
- dilation=1,
- groups=1,
- bias=True):
- super().__init__(
- in_channels,
- out_channels,
- kernel_size,
- stride=stride,
- padding=padding,
- dilation=dilation,
- groups=groups,
- bias=bias)
- self.register_buffer('weight_gamma',
- torch.ones(self.out_channels, 1, 1, 1))
- self.register_buffer('weight_beta',
- torch.zeros(self.out_channels, 1, 1, 1))
-
- def _get_weight(self, weight):
- weight_flat = weight.view(weight.size(0), -1)
- mean = weight_flat.mean(dim=1).view(-1, 1, 1, 1)
- std = torch.sqrt(weight_flat.var(dim=1) + 1e-5).view(-1, 1, 1, 1)
- weight = (weight - mean) / std
- weight = self.weight_gamma * weight + self.weight_beta
- return weight
-
- def forward(self, x):
- weight = self._get_weight(self.weight)
- return F.conv2d(x, weight, self.bias, self.stride, self.padding,
- self.dilation, self.groups)
-
- def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
- missing_keys, unexpected_keys, error_msgs):
- """Override default load function.
-
- AWS overrides the function _load_from_state_dict to recover
- weight_gamma and weight_beta if they are missing. If weight_gamma and
- weight_beta are found in the checkpoint, this function will return
- after super()._load_from_state_dict. Otherwise, it will compute the
- mean and std of the pretrained weights and store them in weight_beta
- and weight_gamma.
- """
-
- self.weight_gamma.data.fill_(-1)
- local_missing_keys = []
- super()._load_from_state_dict(state_dict, prefix, local_metadata,
- strict, local_missing_keys,
- unexpected_keys, error_msgs)
- if self.weight_gamma.data.mean() > 0:
- for k in local_missing_keys:
- missing_keys.append(k)
- return
- weight = self.weight.data
- weight_flat = weight.view(weight.size(0), -1)
- mean = weight_flat.mean(dim=1).view(-1, 1, 1, 1)
- std = torch.sqrt(weight_flat.var(dim=1) + 1e-5).view(-1, 1, 1, 1)
- self.weight_beta.data.copy_(mean)
- self.weight_gamma.data.copy_(std)
- missing_gamma_beta = [
- k for k in local_missing_keys
- if k.endswith('weight_gamma') or k.endswith('weight_beta')
- ]
- for k in missing_gamma_beta:
- local_missing_keys.remove(k)
- for k in local_missing_keys:
- missing_keys.append(k)
diff --git a/spaces/TabPFN/TabPFNEvaluation/TabPFN/differentiable_pfn_evaluation.py b/spaces/TabPFN/TabPFNEvaluation/TabPFN/differentiable_pfn_evaluation.py
deleted file mode 100644
index a4ff651b036318fcab7f989bb8d1ce89310c7cf8..0000000000000000000000000000000000000000
--- a/spaces/TabPFN/TabPFNEvaluation/TabPFN/differentiable_pfn_evaluation.py
+++ /dev/null
@@ -1,345 +0,0 @@
-import os
-import torch
-import numpy as np
-import time
-import pickle
-from scripts import tabular_metrics
-from scripts.tabular_metrics import calculate_score_per_method
-from scripts.tabular_evaluation import evaluate
-from priors.differentiable_prior import draw_random_style
-from tqdm import tqdm
-import random
-from scripts.transformer_prediction_interface import get_params_from_config, load_model_workflow
-
-"""
-===============================
-PUBLIC FUNCTIONS FOR EVALUATION
-===============================
-"""
-
-
-def eval_model_range(i_range, *args, **kwargs):
- for i in i_range:
- eval_model(i, *args, **kwargs)
-
-
-
-def eval_model(i, e, valid_datasets, test_datasets, train_datasets, eval_positions_valid, eval_positions_test,
- bptt_valid,
- bptt_test, add_name, base_path, device='cpu', eval_addition='', **extra_tuning_args):
- """
- Differentiable model evaliation workflow. Evaluates and saves results to disk.
-
- :param i:
- :param e:
- :param valid_datasets:
- :param test_datasets:
- :param train_datasets:
- :param eval_positions_valid:
- :param eval_positions_test:
- :param bptt_valid:
- :param bptt_test:
- :param add_name:
- :param base_path:
- :param device:
- :param eval_addition:
- :param extra_tuning_args:
- :return:
- """
- model, c, results_file = load_model_workflow(i, e, add_name, base_path, device, eval_addition)
- params = {'bptt': bptt_valid
- , 'bptt_final': bptt_test
- , 'eval_positions': eval_positions_valid
- , 'eval_positions_test': eval_positions_test
- , 'valid_datasets': valid_datasets
- , 'test_datasets': test_datasets
- , 'train_datasets': train_datasets
- , 'verbose': True
- , 'device': device
- }
-
- params.update(get_params_from_config(c))
-
- start = time.time()
- metrics, metrics_valid, style, temperature, optimization_route = evaluate_differentiable_model(model, **params,
- **extra_tuning_args)
- print('Evaluation time: ', time.time() - start)
-
- print(results_file)
- r = [c.copy(), metrics, metrics_valid, style.to('cpu'), temperature.to('cpu'), optimization_route]
- with open(results_file, 'wb') as output:
- del r[0]['num_features_used']
- del r[0]['categorical_features_sampler']
- pickle.dump(r, output)
-
- _, _, _, style, temperature, _ = r
-
- return r, model
-
-"""
-===============================
-INTERNAL HELPER FUNCTIONS
-===============================
-"""
-
-def evaluate_differentiable_model(model
- , valid_datasets
- , test_datasets
- , train_datasets
- , N_draws=100
- , N_grad_steps=10
- , eval_positions=None
- , eval_positions_test=None
- , bptt=100
- , bptt_final=200
- , style=None
- , n_parallel_configurations=1
- , device='cpu'
- , selection_metric='auc'
- , final_splits=[1, 2, 3, 4, 5]
- , N_ensemble_configurations_list=[1, 5, 10, 20, 50, 100]
- , **kwargs):
- """
- Evaluation function for diffable model evaluation. Returns a list of results.
-
- :param model:
- :param valid_datasets:
- :param test_datasets:
- :param train_datasets:
- :param N_draws:
- :param N_grad_steps:
- :param eval_positions:
- :param eval_positions_test:
- :param bptt:
- :param bptt_final:
- :param style:
- :param n_parallel_configurations:
- :param device:
- :param selection_metric:
- :param final_splits:
- :param N_ensemble_configurations_list:
- :param kwargs:
- :return:
- """
- torch.manual_seed(0)
- np.random.seed(0)
- random.seed(0)
-
- diffable_metric = tabular_metrics.cross_entropy
- evaluation_metric = tabular_metrics.auc_metric
- if selection_metric in ('auc', 'roc'):
- selection_metric_min_max = 'max'
- selection_metric = tabular_metrics.auc_metric
- evaluation_metric = selection_metric
- elif selection_metric in ('ce', 'selection_metric'):
- selection_metric_min_max = 'min'
- selection_metric = tabular_metrics.cross_entropy
- evaluation_metric = selection_metric
-
- print('Diffable metric', diffable_metric, ' Selection metric', selection_metric, ' Evaluation metric',
- evaluation_metric)
- print('N PARALLEL CONFIGURATIONS', n_parallel_configurations)
- print('eval_positions', eval_positions)
-
- def evaluate_valid(style, softmax_temperature, results, results_tracked):
- result_valid = eval_step(valid_datasets, style, softmax_temperature=softmax_temperature,
- return_tensor=False, inference_mode=True, selection_metric=selection_metric,
- evaluation_metric=evaluation_metric, eval_positions=eval_positions, bptt=bptt, model=model[2])
- result_valid = [float(result_valid[f'mean_select_at_{pos}']) for pos in eval_positions]
- results += [result_valid]
- results_tracked += [np.nanmean(result_valid)]
-
- model[2].to(device)
- model[2].eval()
-
- results_on_valid, results_on_valid_tracked = [], []
- best_style, best_softmax_temperature = style, torch.cat(
- [torch.tensor([0.0]).to(device) for n in range(0, n_parallel_configurations)], 0)
- optimization_routes = []
-
- best_style = torch.cat([draw_random_style(model[3], device).detach() for n in range(0, n_parallel_configurations)],
- 0)
- best_softmax_temperature = torch.cat([torch.tensor([0.0]).to(device) for n in range(0, n_parallel_configurations)],
- 0)
-
-
- for _ in tqdm(range(0, N_draws), desc='Iterate over Optimization initializations'): # Evaluates N hparam draws
- style = torch.cat([draw_random_style(model[3], device).detach() for n in range(0, n_parallel_configurations)],
- 0)
- softmax_temperature = torch.cat([torch.tensor([0.0]).to(device) for n in range(0, n_parallel_configurations)],
- 0)
-
- evaluate_valid(style, softmax_temperature, results_on_valid, results_on_valid_tracked)
-
- print(f'Draw --> Valid Selection metric: {results_on_valid[-1]}')
-
- if N_grad_steps > 0:
- gradient_optimize_result = gradient_optimize_style(model, style, N_grad_steps
- , softmax_temperature=softmax_temperature
- , model=model[2]
- , train_datasets=train_datasets
- , valid_datasets=valid_datasets
- , selection_metric_min_max=selection_metric_min_max
- , **kwargs)
- optimization_routes += [gradient_optimize_result['optimization_route']]
-
- evaluate_valid(gradient_optimize_result['best_style']
- , gradient_optimize_result['best_temperature']
- , results_on_valid, results_on_valid_tracked)
-
- print(f'After diff --> Valid Selection metric: {results_on_valid[-1]}')
-
- if selection_metric_min_max == 'min':
- is_best = (results_on_valid_tracked[-1] <= min(results_on_valid_tracked))
- else:
- is_best = (results_on_valid_tracked[-1] >= max(results_on_valid_tracked))
-
- if is_best or best_style is None:
- best_style = gradient_optimize_result['best_style'].clone()
- best_softmax_temperature = gradient_optimize_result['best_temperature'].clone()
- torch.cuda.empty_cache()
-
- def final_evaluation():
- print('Running eval dataset with final params (no gradients)..')
- print(best_style, best_softmax_temperature)
- result_test = []
- for N_ensemble_configurations in N_ensemble_configurations_list:
- print(f'Running with {N_ensemble_configurations} ensemble_configurations')
- kwargs['N_ensemble_configurations'] = N_ensemble_configurations
- splits = []
- for split in final_splits:
- splits += [eval_step(test_datasets, best_style, softmax_temperature=best_softmax_temperature
- , return_tensor=False, eval_positions=eval_positions_test,
- bptt=bptt_final, inference_mode=True, split_number=split, model=model[2]
- , selection_metric=selection_metric, evaluation_metric=evaluation_metric)]
- result_test += [splits]
-
- print('Running valid dataset with final params (no gradients)..')
- result_valid = eval_step(valid_datasets, best_style, softmax_temperature=best_softmax_temperature
- , return_tensor=False, eval_positions=eval_positions_test,
- bptt=bptt_final, inference_mode=True, model=model[2]
- , selection_metric=selection_metric, evaluation_metric=evaluation_metric)
-
- return result_test, result_valid
-
- result_test, result_valid = final_evaluation()
-
- return result_test, result_valid, best_style, best_softmax_temperature, optimization_routes
-
-
-def eval_step(ds, used_style, selection_metric, evaluation_metric, eval_positions, return_tensor=True, **kwargs):
- def step():
- return evaluate(datasets=ds,
- method='transformer'
- , overwrite=True
- , style=used_style
- , eval_positions=eval_positions
- , metric_used=selection_metric
- , save=False
- , path_interfix=None
- , base_path=None
- , verbose=True
- , **kwargs)
-
- if return_tensor:
- r = step()
- else:
- with torch.no_grad():
- r = step()
-
- calculate_score_per_method(selection_metric, 'select', r, ds, eval_positions, aggregator='mean')
- calculate_score_per_method(evaluation_metric, 'eval', r, ds, eval_positions, aggregator='mean')
-
- return r
-
-
-def gradient_optimize_style(model, init_style, steps, softmax_temperature, train_datasets, valid_datasets, learning_rate=0.03, optimize_all=False,
- limit_style=True, N_datasets_sampled=90, optimize_softmax_temperature=True, selection_metric_min_max='max', **kwargs):
- """
- Uses gradient based methods to optimize 'style' on the 'train_datasets' and uses stopping with 'valid_datasets'.
-
- :param model:
- :param init_style:
- :param steps:
- :param learning_rate:
- :param softmax_temperature:
- :param train_datasets:
- :param valid_datasets:
- :param optimize_all:
- :param limit_style:
- :param N_datasets_sampled:
- :param optimize_softmax_temperature:
- :param selection_metric_min_max:
- :param kwargs:
- :return:
- """
- grad_style = torch.nn.Parameter(init_style.detach(), requires_grad=True)
-
- best_style, best_temperature, best_selection_metric, best_diffable_metric = grad_style.detach(), softmax_temperature.detach(), None, None
- softmax_temperature = torch.nn.Parameter(softmax_temperature.detach(), requires_grad=optimize_softmax_temperature)
- variables_to_optimize = model[2].parameters() if optimize_all else [grad_style, softmax_temperature]
- optimizer = torch.optim.Adam(variables_to_optimize, lr=learning_rate)
-
- optimization_route_selection, optimization_route_diffable = [], []
- optimization_route_selection_valid, optimization_route_diffable_valid = [], []
-
- def eval_opt(ds, return_tensor=True, inference_mode=False):
- result = eval_step(ds, grad_style, softmax_temperature=softmax_temperature, return_tensor=return_tensor
- , inference_mode=inference_mode, model=model[2], **kwargs)
-
- diffable_metric = result['mean_metric']
- selection_metric = result['mean_select']
-
- return diffable_metric, selection_metric
-
- def eval_all_datasets(datasets, propagate=True):
- selection_metrics_this_step, diffable_metrics_this_step = [], []
- for ds in datasets:
- diffable_metric_train, selection_metric_train = eval_opt([ds], inference_mode=(not propagate))
- if not torch.isnan(diffable_metric_train).any():
- if propagate and diffable_metric_train.requires_grad == True:
- diffable_metric_train.backward()
- selection_metrics_this_step += [selection_metric_train]
- diffable_metrics_this_step += [float(diffable_metric_train.detach().cpu().numpy())]
- diffable_metric_train = np.nanmean(diffable_metrics_this_step)
- selection_metric_train = np.nanmean(selection_metrics_this_step)
-
- return diffable_metric_train, selection_metric_train
-
- for t in tqdm(range(steps), desc='Iterate over Optimization steps'):
- optimizer.zero_grad()
-
- # Select subset of datasets
- random.seed(t)
- train_datasets_ = random.sample(train_datasets, N_datasets_sampled)
-
- # Get score on train
- diffable_metric_train, selection_metric_train = eval_all_datasets(train_datasets_, propagate=True)
- optimization_route_selection += [float(selection_metric_train)]
- optimization_route_diffable += [float(diffable_metric_train)]
-
- # Get score on valid
- diffable_metric_valid, selection_metric_valid = eval_all_datasets(valid_datasets, propagate=False)
- optimization_route_selection_valid += [float(selection_metric_valid)]
- optimization_route_diffable_valid += [float(diffable_metric_valid)]
-
- is_best = (selection_metric_min_max == 'min' and best_selection_metric > selection_metric_valid)
- is_best = is_best or (selection_metric_min_max == 'max' and best_selection_metric < selection_metric_valid)
- if (best_selection_metric is None) or (not np.isnan(selection_metric_valid) and is_best):
- print('New best', best_selection_metric, selection_metric_valid)
- best_style = grad_style.detach().clone()
- best_temperature = softmax_temperature.detach().clone()
- best_selection_metric, best_diffable_metric = selection_metric_valid, diffable_metric_valid
-
- optimizer.step()
-
- if limit_style:
- grad_style = grad_style.detach().clamp(-1.74, 1.74)
-
- print(f'Valid: Diffable metric={diffable_metric_valid} Selection metric={selection_metric_valid};' +
- f'Train: Diffable metric={diffable_metric_train} Selection metric={selection_metric_train}')
-
- print(f'Return best:{best_style} {best_selection_metric}')
- return {'best_style': best_style, 'best_temperature': best_temperature
- , 'optimization_route': {'select': optimization_route_selection, 'loss': optimization_route_diffable,
- 'test_select': optimization_route_selection_valid, 'test_loss': optimization_route_diffable_valid}}
\ No newline at end of file
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/cachecontrol/_cmd.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/cachecontrol/_cmd.py
deleted file mode 100644
index 4266b5ee92a24b5e0ef65689a1b94a98bb4a9b56..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/cachecontrol/_cmd.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# SPDX-FileCopyrightText: 2015 Eric Larson
-#
-# SPDX-License-Identifier: Apache-2.0
-
-import logging
-
-from pip._vendor import requests
-
-from pip._vendor.cachecontrol.adapter import CacheControlAdapter
-from pip._vendor.cachecontrol.cache import DictCache
-from pip._vendor.cachecontrol.controller import logger
-
-from argparse import ArgumentParser
-
-
-def setup_logging():
- logger.setLevel(logging.DEBUG)
- handler = logging.StreamHandler()
- logger.addHandler(handler)
-
-
-def get_session():
- adapter = CacheControlAdapter(
- DictCache(), cache_etags=True, serializer=None, heuristic=None
- )
- sess = requests.Session()
- sess.mount("http://", adapter)
- sess.mount("https://", adapter)
-
- sess.cache_controller = adapter.controller
- return sess
-
-
-def get_args():
- parser = ArgumentParser()
- parser.add_argument("url", help="The URL to try and cache")
- return parser.parse_args()
-
-
-def main(args=None):
- args = get_args()
- sess = get_session()
-
- # Make a request to get a response
- resp = sess.get(args.url)
-
- # Turn on logging
- setup_logging()
-
- # try setting the cache
- sess.cache_controller.cache_response(resp.request, resp.raw)
-
- # Now try to get it
- if sess.cache_controller.cached_request(resp.request):
- print("Cached!")
- else:
- print("Not cached :(")
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/_timer.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/_timer.py
deleted file mode 100644
index a2ca6be03c43054caaa3660998273ebf704345dd..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/_timer.py
+++ /dev/null
@@ -1,19 +0,0 @@
-"""
-Timer context manager, only used in debug.
-
-"""
-
-from time import time
-
-import contextlib
-from typing import Generator
-
-
-@contextlib.contextmanager
-def timer(subject: str = "time") -> Generator[None, None, None]:
- """print the elapsed time. (only used in debugging)"""
- start = time()
- yield
- elapsed = time() - start
- elapsed_ms = elapsed * 1000
- print(f"{subject} elapsed {elapsed_ms:.1f}ms")
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/command/install_headers.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/command/install_headers.py
deleted file mode 100644
index 085272c1a2274471d6cdc9022b8a273966086bcf..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/_distutils/command/install_headers.py
+++ /dev/null
@@ -1,44 +0,0 @@
-"""distutils.command.install_headers
-
-Implements the Distutils 'install_headers' command, to install C/C++ header
-files to the Python include directory."""
-
-from ..core import Command
-
-
-# XXX force is never used
-class install_headers(Command):
- description = "install C/C++ header files"
-
- user_options = [
- ('install-dir=', 'd', "directory to install header files to"),
- ('force', 'f', "force installation (overwrite existing files)"),
- ]
-
- boolean_options = ['force']
-
- def initialize_options(self):
- self.install_dir = None
- self.force = 0
- self.outfiles = []
-
- def finalize_options(self):
- self.set_undefined_options(
- 'install', ('install_headers', 'install_dir'), ('force', 'force')
- )
-
- def run(self):
- headers = self.distribution.headers
- if not headers:
- return
-
- self.mkpath(self.install_dir)
- for header in headers:
- (out, _) = self.copy_file(header, self.install_dir)
- self.outfiles.append(out)
-
- def get_inputs(self):
- return self.distribution.headers or []
-
- def get_outputs(self):
- return self.outfiles
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/config/_validate_pyproject/fastjsonschema_validations.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/config/_validate_pyproject/fastjsonschema_validations.py
deleted file mode 100644
index 52e18da24ef1b3667376068beab0c615272eecd5..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/setuptools/config/_validate_pyproject/fastjsonschema_validations.py
+++ /dev/null
@@ -1,1052 +0,0 @@
-# noqa
-# type: ignore
-# flake8: noqa
-# pylint: skip-file
-# mypy: ignore-errors
-# yapf: disable
-# pylama:skip=1
-
-
-# *** PLEASE DO NOT MODIFY DIRECTLY: Automatically generated code ***
-
-
-VERSION = "2.16.3"
-import re
-from .fastjsonschema_exceptions import JsonSchemaValueException
-
-
-REGEX_PATTERNS = {
- '^.*$': re.compile('^.*$'),
- '.+': re.compile('.+'),
- '^.+$': re.compile('^.+$'),
- 'idn-email_re_pattern': re.compile('^[^@]+@[^@]+\\.[^@]+\\Z')
-}
-
-NoneType = type(None)
-
-def validate(data, custom_formats={}, name_prefix=None):
- validate_https___packaging_python_org_en_latest_specifications_declaring_build_dependencies(data, custom_formats, (name_prefix or "data") + "")
- return data
-
-def validate_https___packaging_python_org_en_latest_specifications_declaring_build_dependencies(data, custom_formats={}, name_prefix=None):
- if not isinstance(data, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-build-dependencies/', 'title': 'Data structure for ``pyproject.toml`` files', '$$description': ['File format containing build-time configurations for the Python ecosystem. ', ':pep:`517` initially defined a build-system independent format for source trees', 'which was complemented by :pep:`518` to provide a way of specifying dependencies ', 'for building Python projects.', 'Please notice the ``project`` table (as initially defined in :pep:`621`) is not included', 'in this schema and should be considered separately.'], 'type': 'object', 'additionalProperties': False, 'properties': {'build-system': {'type': 'object', 'description': 'Table used to store build-related data', 'additionalProperties': False, 'properties': {'requires': {'type': 'array', '$$description': ['List of dependencies in the :pep:`508` format required to execute the build', 'system. Please notice that the resulting dependency graph', '**MUST NOT contain cycles**'], 'items': {'type': 'string'}}, 'build-backend': {'type': 'string', 'description': 'Python object that will be used to perform the build according to :pep:`517`', 'format': 'pep517-backend-reference'}, 'backend-path': {'type': 'array', '$$description': ['List of directories to be prepended to ``sys.path`` when loading the', 'back-end, and running its hooks'], 'items': {'type': 'string', '$comment': 'Should be a path (TODO: enforce it with format?)'}}}, 'required': ['requires']}, 'project': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-project-metadata/', 'title': 'Package metadata stored in the ``project`` table', '$$description': ['Data structure for the **project** table inside ``pyproject.toml``', '(as initially defined in :pep:`621`)'], 'type': 'object', 'properties': {'name': {'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, 'version': {'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, 'description': {'type': 'string', '$$description': ['The `summary description of the project', '`_']}, 'readme': {'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, 'requires-python': {'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, 'license': {'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, 'authors': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, 'maintainers': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, 'keywords': {'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, 'classifiers': {'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, 'urls': {'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, 'scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create command-line wrappers for the given', '`entry points `_.']}, 'gui-scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create GUI wrappers for the given', '`entry points `_.', 'The difference between ``scripts`` and ``gui-scripts`` is only relevant in', 'Windows.']}, 'entry-points': {'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$ref': '#/definitions/entry-point-group'}}}, 'dependencies': {'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$ref': '#/definitions/dependency'}}, 'optional-dependencies': {'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$ref': '#/definitions/dependency'}}}}, 'dynamic': {'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}}, 'required': ['name'], 'additionalProperties': False, 'if': {'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, 'then': {'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, 'definitions': {'author': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://peps.python.org/pep-0621/#authors-maintainers', 'type': 'object', 'additionalProperties': False, 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, 'entry-point-group': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'dependency': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}, 'tool': {'type': 'object', 'properties': {'distutils': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://docs.python.org/3/install/', 'title': '``tool.distutils`` table', '$$description': ['Originally, ``distutils`` allowed developers to configure arguments for', '``setup.py`` scripts via `distutils configuration files', '`_.', '``tool.distutils`` subtables could be used with the same purpose', '(NOT CURRENTLY IMPLEMENTED).'], 'type': 'object', 'properties': {'global': {'type': 'object', 'description': 'Global options applied to all ``distutils`` commands'}}, 'patternProperties': {'.+': {'type': 'object'}}, '$comment': 'TODO: Is there a practical way of making this schema more specific?'}, 'setuptools': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://setuptools.pypa.io/en/latest/references/keywords.html', 'title': '``tool.setuptools`` table', '$$description': ['Please notice for the time being the ``setuptools`` project does not specify', 'a way of configuring builds via ``pyproject.toml``.', 'Therefore this schema should be taken just as a *"thought experiment"* on how', 'this *might be done*, by following the principles established in', '`ini2toml `_.', 'It considers only ``setuptools`` `parameters', '`_', 'that can currently be configured via ``setup.cfg`` and are not covered by :pep:`621`', 'but intentionally excludes ``dependency_links`` and ``setup_requires``.', 'NOTE: ``scripts`` was renamed to ``script-files`` to avoid confusion with', 'entry-point based scripts (defined in :pep:`621`).'], 'type': 'object', 'additionalProperties': False, 'properties': {'platforms': {'type': 'array', 'items': {'type': 'string'}}, 'provides': {'$$description': ['Package and virtual package names contained within this package', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'obsoletes': {'$$description': ['Packages which this package renders obsolete', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'zip-safe': {'description': 'Whether the project can be safely installed and run from a zip file.', 'type': 'boolean'}, 'script-files': {'description': 'Legacy way of defining scripts (entry-points are preferred).', 'type': 'array', 'items': {'type': 'string'}, '$comment': 'TODO: is this field deprecated/should be removed?'}, 'eager-resources': {'$$description': ['Resources that should be extracted together, if any of them is needed,', 'or if any C extensions included in the project are imported.'], 'type': 'array', 'items': {'type': 'string'}}, 'packages': {'$$description': ['Packages that should be included in the distribution.', 'It can be given either as a list of package identifiers', 'or as a ``dict``-like structure with a single key ``find``', 'which corresponds to a dynamic call to', '``setuptools.config.expand.find_packages`` function.', 'The ``find`` key is associated with a nested ``dict``-like structure that can', 'contain ``where``, ``include``, ``exclude`` and ``namespaces`` keys,', 'mimicking the keyword arguments of the associated function.'], 'oneOf': [{'title': 'Array of Python package identifiers', 'type': 'array', 'items': {'$ref': '#/definitions/package-name'}}, {'$ref': '#/definitions/find-directive'}]}, 'package-dir': {'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'const': ''}, {'$ref': '#/definitions/package-name'}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, 'package-data': {'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'include-package-data': {'$$description': ['Automatically include any data files inside the package directories', 'that are specified by ``MANIFEST.in``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'boolean'}, 'exclude-package-data': {'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'namespace-packages': {'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'https://setuptools.pypa.io/en/latest/userguide/package_discovery.html'}, 'py-modules': {'description': 'Modules that setuptools will manipulate', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'TODO: clarify the relationship with ``packages``'}, 'data-files': {'$$description': ['**DEPRECATED**: dict-like structure where each key represents a directory and', 'the value is a list of glob patterns that should be installed in them.', "Please notice this don't work with wheels. See `data files support", '`_'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'cmdclass': {'$$description': ['Mapping of distutils-style command names to ``setuptools.Command`` subclasses', 'which in turn should be represented by strings with a qualified class name', '(i.e., "dotted" form with module), e.g.::\n\n', ' cmdclass = {mycmd = "pkg.subpkg.module.CommandClass"}\n\n', 'The command class should be a directly defined at the top-level of the', 'containing module (no class nesting).'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'string', 'format': 'python-qualified-identifier'}}}, 'license-files': {'type': 'array', 'items': {'type': 'string'}, '$$description': ['PROVISIONAL: List of glob patterns for all license files being distributed.', '(might become standard with PEP 639).', "By default: ``['LICEN[CS]E*', 'COPYING*', 'NOTICE*', 'AUTHORS*']``"], '$comment': 'TODO: revise if PEP 639 is accepted. Probably ``project.license-files``?'}, 'dynamic': {'type': 'object', 'description': 'Instructions for loading :pep:`621`-related metadata dynamically', 'additionalProperties': False, 'properties': {'version': {'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'$ref': '#/definitions/attr-directive'}, {'$ref': '#/definitions/file-directive'}]}, 'classifiers': {'$ref': '#/definitions/file-directive'}, 'description': {'$ref': '#/definitions/file-directive'}, 'dependencies': {'$ref': '#/definitions/file-directive'}, 'entry-points': {'$ref': '#/definitions/file-directive'}, 'optional-dependencies': {'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$ref': '#/definitions/file-directive'}}}, 'readme': {'anyOf': [{'$ref': '#/definitions/file-directive'}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}}}}, 'definitions': {'package-name': {'$id': '#/definitions/package-name', 'title': 'Valid package name', 'description': 'Valid package name (importable or PEP 561).', 'type': 'string', 'anyOf': [{'format': 'python-module-name'}, {'format': 'pep561-stub-name'}]}, 'file-directive': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'attr-directive': {'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, 'find-directive': {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}}}}}}, 'project': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-project-metadata/', 'title': 'Package metadata stored in the ``project`` table', '$$description': ['Data structure for the **project** table inside ``pyproject.toml``', '(as initially defined in :pep:`621`)'], 'type': 'object', 'properties': {'name': {'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, 'version': {'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, 'description': {'type': 'string', '$$description': ['The `summary description of the project', '`_']}, 'readme': {'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, 'requires-python': {'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, 'license': {'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, 'authors': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, 'maintainers': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, 'keywords': {'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, 'classifiers': {'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, 'urls': {'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, 'scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create command-line wrappers for the given', '`entry points `_.']}, 'gui-scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create GUI wrappers for the given', '`entry points `_.', 'The difference between ``scripts`` and ``gui-scripts`` is only relevant in', 'Windows.']}, 'entry-points': {'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$ref': '#/definitions/entry-point-group'}}}, 'dependencies': {'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$ref': '#/definitions/dependency'}}, 'optional-dependencies': {'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$ref': '#/definitions/dependency'}}}}, 'dynamic': {'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}}, 'required': ['name'], 'additionalProperties': False, 'if': {'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, 'then': {'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, 'definitions': {'author': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://peps.python.org/pep-0621/#authors-maintainers', 'type': 'object', 'additionalProperties': False, 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, 'entry-point-group': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'dependency': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, rule='type')
- data_is_dict = isinstance(data, dict)
- if data_is_dict:
- data_keys = set(data.keys())
- if "build-system" in data_keys:
- data_keys.remove("build-system")
- data__buildsystem = data["build-system"]
- if not isinstance(data__buildsystem, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system must be object", value=data__buildsystem, name="" + (name_prefix or "data") + ".build-system", definition={'type': 'object', 'description': 'Table used to store build-related data', 'additionalProperties': False, 'properties': {'requires': {'type': 'array', '$$description': ['List of dependencies in the :pep:`508` format required to execute the build', 'system. Please notice that the resulting dependency graph', '**MUST NOT contain cycles**'], 'items': {'type': 'string'}}, 'build-backend': {'type': 'string', 'description': 'Python object that will be used to perform the build according to :pep:`517`', 'format': 'pep517-backend-reference'}, 'backend-path': {'type': 'array', '$$description': ['List of directories to be prepended to ``sys.path`` when loading the', 'back-end, and running its hooks'], 'items': {'type': 'string', '$comment': 'Should be a path (TODO: enforce it with format?)'}}}, 'required': ['requires']}, rule='type')
- data__buildsystem_is_dict = isinstance(data__buildsystem, dict)
- if data__buildsystem_is_dict:
- data__buildsystem_len = len(data__buildsystem)
- if not all(prop in data__buildsystem for prop in ['requires']):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system must contain ['requires'] properties", value=data__buildsystem, name="" + (name_prefix or "data") + ".build-system", definition={'type': 'object', 'description': 'Table used to store build-related data', 'additionalProperties': False, 'properties': {'requires': {'type': 'array', '$$description': ['List of dependencies in the :pep:`508` format required to execute the build', 'system. Please notice that the resulting dependency graph', '**MUST NOT contain cycles**'], 'items': {'type': 'string'}}, 'build-backend': {'type': 'string', 'description': 'Python object that will be used to perform the build according to :pep:`517`', 'format': 'pep517-backend-reference'}, 'backend-path': {'type': 'array', '$$description': ['List of directories to be prepended to ``sys.path`` when loading the', 'back-end, and running its hooks'], 'items': {'type': 'string', '$comment': 'Should be a path (TODO: enforce it with format?)'}}}, 'required': ['requires']}, rule='required')
- data__buildsystem_keys = set(data__buildsystem.keys())
- if "requires" in data__buildsystem_keys:
- data__buildsystem_keys.remove("requires")
- data__buildsystem__requires = data__buildsystem["requires"]
- if not isinstance(data__buildsystem__requires, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system.requires must be array", value=data__buildsystem__requires, name="" + (name_prefix or "data") + ".build-system.requires", definition={'type': 'array', '$$description': ['List of dependencies in the :pep:`508` format required to execute the build', 'system. Please notice that the resulting dependency graph', '**MUST NOT contain cycles**'], 'items': {'type': 'string'}}, rule='type')
- data__buildsystem__requires_is_list = isinstance(data__buildsystem__requires, (list, tuple))
- if data__buildsystem__requires_is_list:
- data__buildsystem__requires_len = len(data__buildsystem__requires)
- for data__buildsystem__requires_x, data__buildsystem__requires_item in enumerate(data__buildsystem__requires):
- if not isinstance(data__buildsystem__requires_item, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system.requires[{data__buildsystem__requires_x}]".format(**locals()) + " must be string", value=data__buildsystem__requires_item, name="" + (name_prefix or "data") + ".build-system.requires[{data__buildsystem__requires_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type')
- if "build-backend" in data__buildsystem_keys:
- data__buildsystem_keys.remove("build-backend")
- data__buildsystem__buildbackend = data__buildsystem["build-backend"]
- if not isinstance(data__buildsystem__buildbackend, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system.build-backend must be string", value=data__buildsystem__buildbackend, name="" + (name_prefix or "data") + ".build-system.build-backend", definition={'type': 'string', 'description': 'Python object that will be used to perform the build according to :pep:`517`', 'format': 'pep517-backend-reference'}, rule='type')
- if isinstance(data__buildsystem__buildbackend, str):
- if not custom_formats["pep517-backend-reference"](data__buildsystem__buildbackend):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system.build-backend must be pep517-backend-reference", value=data__buildsystem__buildbackend, name="" + (name_prefix or "data") + ".build-system.build-backend", definition={'type': 'string', 'description': 'Python object that will be used to perform the build according to :pep:`517`', 'format': 'pep517-backend-reference'}, rule='format')
- if "backend-path" in data__buildsystem_keys:
- data__buildsystem_keys.remove("backend-path")
- data__buildsystem__backendpath = data__buildsystem["backend-path"]
- if not isinstance(data__buildsystem__backendpath, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system.backend-path must be array", value=data__buildsystem__backendpath, name="" + (name_prefix or "data") + ".build-system.backend-path", definition={'type': 'array', '$$description': ['List of directories to be prepended to ``sys.path`` when loading the', 'back-end, and running its hooks'], 'items': {'type': 'string', '$comment': 'Should be a path (TODO: enforce it with format?)'}}, rule='type')
- data__buildsystem__backendpath_is_list = isinstance(data__buildsystem__backendpath, (list, tuple))
- if data__buildsystem__backendpath_is_list:
- data__buildsystem__backendpath_len = len(data__buildsystem__backendpath)
- for data__buildsystem__backendpath_x, data__buildsystem__backendpath_item in enumerate(data__buildsystem__backendpath):
- if not isinstance(data__buildsystem__backendpath_item, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system.backend-path[{data__buildsystem__backendpath_x}]".format(**locals()) + " must be string", value=data__buildsystem__backendpath_item, name="" + (name_prefix or "data") + ".build-system.backend-path[{data__buildsystem__backendpath_x}]".format(**locals()) + "", definition={'type': 'string', '$comment': 'Should be a path (TODO: enforce it with format?)'}, rule='type')
- if data__buildsystem_keys:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".build-system must not contain "+str(data__buildsystem_keys)+" properties", value=data__buildsystem, name="" + (name_prefix or "data") + ".build-system", definition={'type': 'object', 'description': 'Table used to store build-related data', 'additionalProperties': False, 'properties': {'requires': {'type': 'array', '$$description': ['List of dependencies in the :pep:`508` format required to execute the build', 'system. Please notice that the resulting dependency graph', '**MUST NOT contain cycles**'], 'items': {'type': 'string'}}, 'build-backend': {'type': 'string', 'description': 'Python object that will be used to perform the build according to :pep:`517`', 'format': 'pep517-backend-reference'}, 'backend-path': {'type': 'array', '$$description': ['List of directories to be prepended to ``sys.path`` when loading the', 'back-end, and running its hooks'], 'items': {'type': 'string', '$comment': 'Should be a path (TODO: enforce it with format?)'}}}, 'required': ['requires']}, rule='additionalProperties')
- if "project" in data_keys:
- data_keys.remove("project")
- data__project = data["project"]
- validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata(data__project, custom_formats, (name_prefix or "data") + ".project")
- if "tool" in data_keys:
- data_keys.remove("tool")
- data__tool = data["tool"]
- if not isinstance(data__tool, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".tool must be object", value=data__tool, name="" + (name_prefix or "data") + ".tool", definition={'type': 'object', 'properties': {'distutils': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://docs.python.org/3/install/', 'title': '``tool.distutils`` table', '$$description': ['Originally, ``distutils`` allowed developers to configure arguments for', '``setup.py`` scripts via `distutils configuration files', '`_.', '``tool.distutils`` subtables could be used with the same purpose', '(NOT CURRENTLY IMPLEMENTED).'], 'type': 'object', 'properties': {'global': {'type': 'object', 'description': 'Global options applied to all ``distutils`` commands'}}, 'patternProperties': {'.+': {'type': 'object'}}, '$comment': 'TODO: Is there a practical way of making this schema more specific?'}, 'setuptools': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://setuptools.pypa.io/en/latest/references/keywords.html', 'title': '``tool.setuptools`` table', '$$description': ['Please notice for the time being the ``setuptools`` project does not specify', 'a way of configuring builds via ``pyproject.toml``.', 'Therefore this schema should be taken just as a *"thought experiment"* on how', 'this *might be done*, by following the principles established in', '`ini2toml `_.', 'It considers only ``setuptools`` `parameters', '`_', 'that can currently be configured via ``setup.cfg`` and are not covered by :pep:`621`', 'but intentionally excludes ``dependency_links`` and ``setup_requires``.', 'NOTE: ``scripts`` was renamed to ``script-files`` to avoid confusion with', 'entry-point based scripts (defined in :pep:`621`).'], 'type': 'object', 'additionalProperties': False, 'properties': {'platforms': {'type': 'array', 'items': {'type': 'string'}}, 'provides': {'$$description': ['Package and virtual package names contained within this package', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'obsoletes': {'$$description': ['Packages which this package renders obsolete', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'zip-safe': {'description': 'Whether the project can be safely installed and run from a zip file.', 'type': 'boolean'}, 'script-files': {'description': 'Legacy way of defining scripts (entry-points are preferred).', 'type': 'array', 'items': {'type': 'string'}, '$comment': 'TODO: is this field deprecated/should be removed?'}, 'eager-resources': {'$$description': ['Resources that should be extracted together, if any of them is needed,', 'or if any C extensions included in the project are imported.'], 'type': 'array', 'items': {'type': 'string'}}, 'packages': {'$$description': ['Packages that should be included in the distribution.', 'It can be given either as a list of package identifiers', 'or as a ``dict``-like structure with a single key ``find``', 'which corresponds to a dynamic call to', '``setuptools.config.expand.find_packages`` function.', 'The ``find`` key is associated with a nested ``dict``-like structure that can', 'contain ``where``, ``include``, ``exclude`` and ``namespaces`` keys,', 'mimicking the keyword arguments of the associated function.'], 'oneOf': [{'title': 'Array of Python package identifiers', 'type': 'array', 'items': {'$ref': '#/definitions/package-name'}}, {'$ref': '#/definitions/find-directive'}]}, 'package-dir': {'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'const': ''}, {'$ref': '#/definitions/package-name'}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, 'package-data': {'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'include-package-data': {'$$description': ['Automatically include any data files inside the package directories', 'that are specified by ``MANIFEST.in``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'boolean'}, 'exclude-package-data': {'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'namespace-packages': {'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'https://setuptools.pypa.io/en/latest/userguide/package_discovery.html'}, 'py-modules': {'description': 'Modules that setuptools will manipulate', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'TODO: clarify the relationship with ``packages``'}, 'data-files': {'$$description': ['**DEPRECATED**: dict-like structure where each key represents a directory and', 'the value is a list of glob patterns that should be installed in them.', "Please notice this don't work with wheels. See `data files support", '`_'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'cmdclass': {'$$description': ['Mapping of distutils-style command names to ``setuptools.Command`` subclasses', 'which in turn should be represented by strings with a qualified class name', '(i.e., "dotted" form with module), e.g.::\n\n', ' cmdclass = {mycmd = "pkg.subpkg.module.CommandClass"}\n\n', 'The command class should be a directly defined at the top-level of the', 'containing module (no class nesting).'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'string', 'format': 'python-qualified-identifier'}}}, 'license-files': {'type': 'array', 'items': {'type': 'string'}, '$$description': ['PROVISIONAL: List of glob patterns for all license files being distributed.', '(might become standard with PEP 639).', "By default: ``['LICEN[CS]E*', 'COPYING*', 'NOTICE*', 'AUTHORS*']``"], '$comment': 'TODO: revise if PEP 639 is accepted. Probably ``project.license-files``?'}, 'dynamic': {'type': 'object', 'description': 'Instructions for loading :pep:`621`-related metadata dynamically', 'additionalProperties': False, 'properties': {'version': {'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'$ref': '#/definitions/attr-directive'}, {'$ref': '#/definitions/file-directive'}]}, 'classifiers': {'$ref': '#/definitions/file-directive'}, 'description': {'$ref': '#/definitions/file-directive'}, 'dependencies': {'$ref': '#/definitions/file-directive'}, 'entry-points': {'$ref': '#/definitions/file-directive'}, 'optional-dependencies': {'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$ref': '#/definitions/file-directive'}}}, 'readme': {'anyOf': [{'$ref': '#/definitions/file-directive'}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}}}}, 'definitions': {'package-name': {'$id': '#/definitions/package-name', 'title': 'Valid package name', 'description': 'Valid package name (importable or PEP 561).', 'type': 'string', 'anyOf': [{'format': 'python-module-name'}, {'format': 'pep561-stub-name'}]}, 'file-directive': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'attr-directive': {'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, 'find-directive': {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}}}}}, rule='type')
- data__tool_is_dict = isinstance(data__tool, dict)
- if data__tool_is_dict:
- data__tool_keys = set(data__tool.keys())
- if "distutils" in data__tool_keys:
- data__tool_keys.remove("distutils")
- data__tool__distutils = data__tool["distutils"]
- validate_https___docs_python_org_3_install(data__tool__distutils, custom_formats, (name_prefix or "data") + ".tool.distutils")
- if "setuptools" in data__tool_keys:
- data__tool_keys.remove("setuptools")
- data__tool__setuptools = data__tool["setuptools"]
- validate_https___setuptools_pypa_io_en_latest_references_keywords_html(data__tool__setuptools, custom_formats, (name_prefix or "data") + ".tool.setuptools")
- if data_keys:
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must not contain "+str(data_keys)+" properties", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-build-dependencies/', 'title': 'Data structure for ``pyproject.toml`` files', '$$description': ['File format containing build-time configurations for the Python ecosystem. ', ':pep:`517` initially defined a build-system independent format for source trees', 'which was complemented by :pep:`518` to provide a way of specifying dependencies ', 'for building Python projects.', 'Please notice the ``project`` table (as initially defined in :pep:`621`) is not included', 'in this schema and should be considered separately.'], 'type': 'object', 'additionalProperties': False, 'properties': {'build-system': {'type': 'object', 'description': 'Table used to store build-related data', 'additionalProperties': False, 'properties': {'requires': {'type': 'array', '$$description': ['List of dependencies in the :pep:`508` format required to execute the build', 'system. Please notice that the resulting dependency graph', '**MUST NOT contain cycles**'], 'items': {'type': 'string'}}, 'build-backend': {'type': 'string', 'description': 'Python object that will be used to perform the build according to :pep:`517`', 'format': 'pep517-backend-reference'}, 'backend-path': {'type': 'array', '$$description': ['List of directories to be prepended to ``sys.path`` when loading the', 'back-end, and running its hooks'], 'items': {'type': 'string', '$comment': 'Should be a path (TODO: enforce it with format?)'}}}, 'required': ['requires']}, 'project': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-project-metadata/', 'title': 'Package metadata stored in the ``project`` table', '$$description': ['Data structure for the **project** table inside ``pyproject.toml``', '(as initially defined in :pep:`621`)'], 'type': 'object', 'properties': {'name': {'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, 'version': {'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, 'description': {'type': 'string', '$$description': ['The `summary description of the project', '`_']}, 'readme': {'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, 'requires-python': {'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, 'license': {'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, 'authors': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, 'maintainers': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, 'keywords': {'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, 'classifiers': {'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, 'urls': {'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, 'scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create command-line wrappers for the given', '`entry points `_.']}, 'gui-scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create GUI wrappers for the given', '`entry points `_.', 'The difference between ``scripts`` and ``gui-scripts`` is only relevant in', 'Windows.']}, 'entry-points': {'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$ref': '#/definitions/entry-point-group'}}}, 'dependencies': {'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$ref': '#/definitions/dependency'}}, 'optional-dependencies': {'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$ref': '#/definitions/dependency'}}}}, 'dynamic': {'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}}, 'required': ['name'], 'additionalProperties': False, 'if': {'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, 'then': {'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, 'definitions': {'author': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://peps.python.org/pep-0621/#authors-maintainers', 'type': 'object', 'additionalProperties': False, 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, 'entry-point-group': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'dependency': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}, 'tool': {'type': 'object', 'properties': {'distutils': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://docs.python.org/3/install/', 'title': '``tool.distutils`` table', '$$description': ['Originally, ``distutils`` allowed developers to configure arguments for', '``setup.py`` scripts via `distutils configuration files', '`_.', '``tool.distutils`` subtables could be used with the same purpose', '(NOT CURRENTLY IMPLEMENTED).'], 'type': 'object', 'properties': {'global': {'type': 'object', 'description': 'Global options applied to all ``distutils`` commands'}}, 'patternProperties': {'.+': {'type': 'object'}}, '$comment': 'TODO: Is there a practical way of making this schema more specific?'}, 'setuptools': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://setuptools.pypa.io/en/latest/references/keywords.html', 'title': '``tool.setuptools`` table', '$$description': ['Please notice for the time being the ``setuptools`` project does not specify', 'a way of configuring builds via ``pyproject.toml``.', 'Therefore this schema should be taken just as a *"thought experiment"* on how', 'this *might be done*, by following the principles established in', '`ini2toml `_.', 'It considers only ``setuptools`` `parameters', '`_', 'that can currently be configured via ``setup.cfg`` and are not covered by :pep:`621`', 'but intentionally excludes ``dependency_links`` and ``setup_requires``.', 'NOTE: ``scripts`` was renamed to ``script-files`` to avoid confusion with', 'entry-point based scripts (defined in :pep:`621`).'], 'type': 'object', 'additionalProperties': False, 'properties': {'platforms': {'type': 'array', 'items': {'type': 'string'}}, 'provides': {'$$description': ['Package and virtual package names contained within this package', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'obsoletes': {'$$description': ['Packages which this package renders obsolete', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'zip-safe': {'description': 'Whether the project can be safely installed and run from a zip file.', 'type': 'boolean'}, 'script-files': {'description': 'Legacy way of defining scripts (entry-points are preferred).', 'type': 'array', 'items': {'type': 'string'}, '$comment': 'TODO: is this field deprecated/should be removed?'}, 'eager-resources': {'$$description': ['Resources that should be extracted together, if any of them is needed,', 'or if any C extensions included in the project are imported.'], 'type': 'array', 'items': {'type': 'string'}}, 'packages': {'$$description': ['Packages that should be included in the distribution.', 'It can be given either as a list of package identifiers', 'or as a ``dict``-like structure with a single key ``find``', 'which corresponds to a dynamic call to', '``setuptools.config.expand.find_packages`` function.', 'The ``find`` key is associated with a nested ``dict``-like structure that can', 'contain ``where``, ``include``, ``exclude`` and ``namespaces`` keys,', 'mimicking the keyword arguments of the associated function.'], 'oneOf': [{'title': 'Array of Python package identifiers', 'type': 'array', 'items': {'$ref': '#/definitions/package-name'}}, {'$ref': '#/definitions/find-directive'}]}, 'package-dir': {'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'const': ''}, {'$ref': '#/definitions/package-name'}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, 'package-data': {'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'include-package-data': {'$$description': ['Automatically include any data files inside the package directories', 'that are specified by ``MANIFEST.in``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'boolean'}, 'exclude-package-data': {'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'namespace-packages': {'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'https://setuptools.pypa.io/en/latest/userguide/package_discovery.html'}, 'py-modules': {'description': 'Modules that setuptools will manipulate', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'TODO: clarify the relationship with ``packages``'}, 'data-files': {'$$description': ['**DEPRECATED**: dict-like structure where each key represents a directory and', 'the value is a list of glob patterns that should be installed in them.', "Please notice this don't work with wheels. See `data files support", '`_'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'cmdclass': {'$$description': ['Mapping of distutils-style command names to ``setuptools.Command`` subclasses', 'which in turn should be represented by strings with a qualified class name', '(i.e., "dotted" form with module), e.g.::\n\n', ' cmdclass = {mycmd = "pkg.subpkg.module.CommandClass"}\n\n', 'The command class should be a directly defined at the top-level of the', 'containing module (no class nesting).'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'string', 'format': 'python-qualified-identifier'}}}, 'license-files': {'type': 'array', 'items': {'type': 'string'}, '$$description': ['PROVISIONAL: List of glob patterns for all license files being distributed.', '(might become standard with PEP 639).', "By default: ``['LICEN[CS]E*', 'COPYING*', 'NOTICE*', 'AUTHORS*']``"], '$comment': 'TODO: revise if PEP 639 is accepted. Probably ``project.license-files``?'}, 'dynamic': {'type': 'object', 'description': 'Instructions for loading :pep:`621`-related metadata dynamically', 'additionalProperties': False, 'properties': {'version': {'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'$ref': '#/definitions/attr-directive'}, {'$ref': '#/definitions/file-directive'}]}, 'classifiers': {'$ref': '#/definitions/file-directive'}, 'description': {'$ref': '#/definitions/file-directive'}, 'dependencies': {'$ref': '#/definitions/file-directive'}, 'entry-points': {'$ref': '#/definitions/file-directive'}, 'optional-dependencies': {'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$ref': '#/definitions/file-directive'}}}, 'readme': {'anyOf': [{'$ref': '#/definitions/file-directive'}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}}}}, 'definitions': {'package-name': {'$id': '#/definitions/package-name', 'title': 'Valid package name', 'description': 'Valid package name (importable or PEP 561).', 'type': 'string', 'anyOf': [{'format': 'python-module-name'}, {'format': 'pep561-stub-name'}]}, 'file-directive': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'attr-directive': {'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, 'find-directive': {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}}}}}}, 'project': {'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-project-metadata/', 'title': 'Package metadata stored in the ``project`` table', '$$description': ['Data structure for the **project** table inside ``pyproject.toml``', '(as initially defined in :pep:`621`)'], 'type': 'object', 'properties': {'name': {'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, 'version': {'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, 'description': {'type': 'string', '$$description': ['The `summary description of the project', '`_']}, 'readme': {'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, 'requires-python': {'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, 'license': {'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, 'authors': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, 'maintainers': {'type': 'array', 'items': {'$ref': '#/definitions/author'}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, 'keywords': {'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, 'classifiers': {'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, 'urls': {'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, 'scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create command-line wrappers for the given', '`entry points `_.']}, 'gui-scripts': {'$ref': '#/definitions/entry-point-group', '$$description': ['Instruct the installer to create GUI wrappers for the given', '`entry points `_.', 'The difference between ``scripts`` and ``gui-scripts`` is only relevant in', 'Windows.']}, 'entry-points': {'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$ref': '#/definitions/entry-point-group'}}}, 'dependencies': {'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$ref': '#/definitions/dependency'}}, 'optional-dependencies': {'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$ref': '#/definitions/dependency'}}}}, 'dynamic': {'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}}, 'required': ['name'], 'additionalProperties': False, 'if': {'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, 'then': {'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, 'definitions': {'author': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://peps.python.org/pep-0621/#authors-maintainers', 'type': 'object', 'additionalProperties': False, 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, 'entry-point-group': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'dependency': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, rule='additionalProperties')
- return data
-
-def validate_https___setuptools_pypa_io_en_latest_references_keywords_html(data, custom_formats={}, name_prefix=None):
- if not isinstance(data, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://setuptools.pypa.io/en/latest/references/keywords.html', 'title': '``tool.setuptools`` table', '$$description': ['Please notice for the time being the ``setuptools`` project does not specify', 'a way of configuring builds via ``pyproject.toml``.', 'Therefore this schema should be taken just as a *"thought experiment"* on how', 'this *might be done*, by following the principles established in', '`ini2toml `_.', 'It considers only ``setuptools`` `parameters', '`_', 'that can currently be configured via ``setup.cfg`` and are not covered by :pep:`621`', 'but intentionally excludes ``dependency_links`` and ``setup_requires``.', 'NOTE: ``scripts`` was renamed to ``script-files`` to avoid confusion with', 'entry-point based scripts (defined in :pep:`621`).'], 'type': 'object', 'additionalProperties': False, 'properties': {'platforms': {'type': 'array', 'items': {'type': 'string'}}, 'provides': {'$$description': ['Package and virtual package names contained within this package', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'obsoletes': {'$$description': ['Packages which this package renders obsolete', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'zip-safe': {'description': 'Whether the project can be safely installed and run from a zip file.', 'type': 'boolean'}, 'script-files': {'description': 'Legacy way of defining scripts (entry-points are preferred).', 'type': 'array', 'items': {'type': 'string'}, '$comment': 'TODO: is this field deprecated/should be removed?'}, 'eager-resources': {'$$description': ['Resources that should be extracted together, if any of them is needed,', 'or if any C extensions included in the project are imported.'], 'type': 'array', 'items': {'type': 'string'}}, 'packages': {'$$description': ['Packages that should be included in the distribution.', 'It can be given either as a list of package identifiers', 'or as a ``dict``-like structure with a single key ``find``', 'which corresponds to a dynamic call to', '``setuptools.config.expand.find_packages`` function.', 'The ``find`` key is associated with a nested ``dict``-like structure that can', 'contain ``where``, ``include``, ``exclude`` and ``namespaces`` keys,', 'mimicking the keyword arguments of the associated function.'], 'oneOf': [{'title': 'Array of Python package identifiers', 'type': 'array', 'items': {'$id': '#/definitions/package-name', 'title': 'Valid package name', 'description': 'Valid package name (importable or PEP 561).', 'type': 'string', 'anyOf': [{'format': 'python-module-name'}, {'format': 'pep561-stub-name'}]}}, {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}]}, 'package-dir': {'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'const': ''}, {'$id': '#/definitions/package-name', 'title': 'Valid package name', 'description': 'Valid package name (importable or PEP 561).', 'type': 'string', 'anyOf': [{'format': 'python-module-name'}, {'format': 'pep561-stub-name'}]}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, 'package-data': {'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'include-package-data': {'$$description': ['Automatically include any data files inside the package directories', 'that are specified by ``MANIFEST.in``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'boolean'}, 'exclude-package-data': {'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'namespace-packages': {'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'https://setuptools.pypa.io/en/latest/userguide/package_discovery.html'}, 'py-modules': {'description': 'Modules that setuptools will manipulate', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'TODO: clarify the relationship with ``packages``'}, 'data-files': {'$$description': ['**DEPRECATED**: dict-like structure where each key represents a directory and', 'the value is a list of glob patterns that should be installed in them.', "Please notice this don't work with wheels. See `data files support", '`_'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'cmdclass': {'$$description': ['Mapping of distutils-style command names to ``setuptools.Command`` subclasses', 'which in turn should be represented by strings with a qualified class name', '(i.e., "dotted" form with module), e.g.::\n\n', ' cmdclass = {mycmd = "pkg.subpkg.module.CommandClass"}\n\n', 'The command class should be a directly defined at the top-level of the', 'containing module (no class nesting).'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'string', 'format': 'python-qualified-identifier'}}}, 'license-files': {'type': 'array', 'items': {'type': 'string'}, '$$description': ['PROVISIONAL: List of glob patterns for all license files being distributed.', '(might become standard with PEP 639).', "By default: ``['LICEN[CS]E*', 'COPYING*', 'NOTICE*', 'AUTHORS*']``"], '$comment': 'TODO: revise if PEP 639 is accepted. Probably ``project.license-files``?'}, 'dynamic': {'type': 'object', 'description': 'Instructions for loading :pep:`621`-related metadata dynamically', 'additionalProperties': False, 'properties': {'version': {'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}]}, 'classifiers': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'description': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'dependencies': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'entry-points': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'optional-dependencies': {'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}}}, 'readme': {'anyOf': [{'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}}}}, 'definitions': {'package-name': {'$id': '#/definitions/package-name', 'title': 'Valid package name', 'description': 'Valid package name (importable or PEP 561).', 'type': 'string', 'anyOf': [{'format': 'python-module-name'}, {'format': 'pep561-stub-name'}]}, 'file-directive': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'attr-directive': {'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, 'find-directive': {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}}}, rule='type')
- data_is_dict = isinstance(data, dict)
- if data_is_dict:
- data_keys = set(data.keys())
- if "platforms" in data_keys:
- data_keys.remove("platforms")
- data__platforms = data["platforms"]
- if not isinstance(data__platforms, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".platforms must be array", value=data__platforms, name="" + (name_prefix or "data") + ".platforms", definition={'type': 'array', 'items': {'type': 'string'}}, rule='type')
- data__platforms_is_list = isinstance(data__platforms, (list, tuple))
- if data__platforms_is_list:
- data__platforms_len = len(data__platforms)
- for data__platforms_x, data__platforms_item in enumerate(data__platforms):
- if not isinstance(data__platforms_item, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".platforms[{data__platforms_x}]".format(**locals()) + " must be string", value=data__platforms_item, name="" + (name_prefix or "data") + ".platforms[{data__platforms_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type')
- if "provides" in data_keys:
- data_keys.remove("provides")
- data__provides = data["provides"]
- if not isinstance(data__provides, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".provides must be array", value=data__provides, name="" + (name_prefix or "data") + ".provides", definition={'$$description': ['Package and virtual package names contained within this package', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, rule='type')
- data__provides_is_list = isinstance(data__provides, (list, tuple))
- if data__provides_is_list:
- data__provides_len = len(data__provides)
- for data__provides_x, data__provides_item in enumerate(data__provides):
- if not isinstance(data__provides_item, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".provides[{data__provides_x}]".format(**locals()) + " must be string", value=data__provides_item, name="" + (name_prefix or "data") + ".provides[{data__provides_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'pep508-identifier'}, rule='type')
- if isinstance(data__provides_item, str):
- if not custom_formats["pep508-identifier"](data__provides_item):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".provides[{data__provides_x}]".format(**locals()) + " must be pep508-identifier", value=data__provides_item, name="" + (name_prefix or "data") + ".provides[{data__provides_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'pep508-identifier'}, rule='format')
- if "obsoletes" in data_keys:
- data_keys.remove("obsoletes")
- data__obsoletes = data["obsoletes"]
- if not isinstance(data__obsoletes, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".obsoletes must be array", value=data__obsoletes, name="" + (name_prefix or "data") + ".obsoletes", definition={'$$description': ['Packages which this package renders obsolete', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, rule='type')
- data__obsoletes_is_list = isinstance(data__obsoletes, (list, tuple))
- if data__obsoletes_is_list:
- data__obsoletes_len = len(data__obsoletes)
- for data__obsoletes_x, data__obsoletes_item in enumerate(data__obsoletes):
- if not isinstance(data__obsoletes_item, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".obsoletes[{data__obsoletes_x}]".format(**locals()) + " must be string", value=data__obsoletes_item, name="" + (name_prefix or "data") + ".obsoletes[{data__obsoletes_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'pep508-identifier'}, rule='type')
- if isinstance(data__obsoletes_item, str):
- if not custom_formats["pep508-identifier"](data__obsoletes_item):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".obsoletes[{data__obsoletes_x}]".format(**locals()) + " must be pep508-identifier", value=data__obsoletes_item, name="" + (name_prefix or "data") + ".obsoletes[{data__obsoletes_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'pep508-identifier'}, rule='format')
- if "zip-safe" in data_keys:
- data_keys.remove("zip-safe")
- data__zipsafe = data["zip-safe"]
- if not isinstance(data__zipsafe, (bool)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".zip-safe must be boolean", value=data__zipsafe, name="" + (name_prefix or "data") + ".zip-safe", definition={'description': 'Whether the project can be safely installed and run from a zip file.', 'type': 'boolean'}, rule='type')
- if "script-files" in data_keys:
- data_keys.remove("script-files")
- data__scriptfiles = data["script-files"]
- if not isinstance(data__scriptfiles, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".script-files must be array", value=data__scriptfiles, name="" + (name_prefix or "data") + ".script-files", definition={'description': 'Legacy way of defining scripts (entry-points are preferred).', 'type': 'array', 'items': {'type': 'string'}, '$comment': 'TODO: is this field deprecated/should be removed?'}, rule='type')
- data__scriptfiles_is_list = isinstance(data__scriptfiles, (list, tuple))
- if data__scriptfiles_is_list:
- data__scriptfiles_len = len(data__scriptfiles)
- for data__scriptfiles_x, data__scriptfiles_item in enumerate(data__scriptfiles):
- if not isinstance(data__scriptfiles_item, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".script-files[{data__scriptfiles_x}]".format(**locals()) + " must be string", value=data__scriptfiles_item, name="" + (name_prefix or "data") + ".script-files[{data__scriptfiles_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type')
- if "eager-resources" in data_keys:
- data_keys.remove("eager-resources")
- data__eagerresources = data["eager-resources"]
- if not isinstance(data__eagerresources, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".eager-resources must be array", value=data__eagerresources, name="" + (name_prefix or "data") + ".eager-resources", definition={'$$description': ['Resources that should be extracted together, if any of them is needed,', 'or if any C extensions included in the project are imported.'], 'type': 'array', 'items': {'type': 'string'}}, rule='type')
- data__eagerresources_is_list = isinstance(data__eagerresources, (list, tuple))
- if data__eagerresources_is_list:
- data__eagerresources_len = len(data__eagerresources)
- for data__eagerresources_x, data__eagerresources_item in enumerate(data__eagerresources):
- if not isinstance(data__eagerresources_item, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".eager-resources[{data__eagerresources_x}]".format(**locals()) + " must be string", value=data__eagerresources_item, name="" + (name_prefix or "data") + ".eager-resources[{data__eagerresources_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type')
- if "packages" in data_keys:
- data_keys.remove("packages")
- data__packages = data["packages"]
- data__packages_one_of_count1 = 0
- if data__packages_one_of_count1 < 2:
- try:
- if not isinstance(data__packages, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".packages must be array", value=data__packages, name="" + (name_prefix or "data") + ".packages", definition={'title': 'Array of Python package identifiers', 'type': 'array', 'items': {'$id': '#/definitions/package-name', 'title': 'Valid package name', 'description': 'Valid package name (importable or PEP 561).', 'type': 'string', 'anyOf': [{'format': 'python-module-name'}, {'format': 'pep561-stub-name'}]}}, rule='type')
- data__packages_is_list = isinstance(data__packages, (list, tuple))
- if data__packages_is_list:
- data__packages_len = len(data__packages)
- for data__packages_x, data__packages_item in enumerate(data__packages):
- validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_package_name(data__packages_item, custom_formats, (name_prefix or "data") + ".packages[{data__packages_x}]".format(**locals()))
- data__packages_one_of_count1 += 1
- except JsonSchemaValueException: pass
- if data__packages_one_of_count1 < 2:
- try:
- validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_find_directive(data__packages, custom_formats, (name_prefix or "data") + ".packages")
- data__packages_one_of_count1 += 1
- except JsonSchemaValueException: pass
- if data__packages_one_of_count1 != 1:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".packages must be valid exactly by one definition" + (" (" + str(data__packages_one_of_count1) + " matches found)"), value=data__packages, name="" + (name_prefix or "data") + ".packages", definition={'$$description': ['Packages that should be included in the distribution.', 'It can be given either as a list of package identifiers', 'or as a ``dict``-like structure with a single key ``find``', 'which corresponds to a dynamic call to', '``setuptools.config.expand.find_packages`` function.', 'The ``find`` key is associated with a nested ``dict``-like structure that can', 'contain ``where``, ``include``, ``exclude`` and ``namespaces`` keys,', 'mimicking the keyword arguments of the associated function.'], 'oneOf': [{'title': 'Array of Python package identifiers', 'type': 'array', 'items': {'$id': '#/definitions/package-name', 'title': 'Valid package name', 'description': 'Valid package name (importable or PEP 561).', 'type': 'string', 'anyOf': [{'format': 'python-module-name'}, {'format': 'pep561-stub-name'}]}}, {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}]}, rule='oneOf')
- if "package-dir" in data_keys:
- data_keys.remove("package-dir")
- data__packagedir = data["package-dir"]
- if not isinstance(data__packagedir, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-dir must be object", value=data__packagedir, name="" + (name_prefix or "data") + ".package-dir", definition={'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'const': ''}, {'$id': '#/definitions/package-name', 'title': 'Valid package name', 'description': 'Valid package name (importable or PEP 561).', 'type': 'string', 'anyOf': [{'format': 'python-module-name'}, {'format': 'pep561-stub-name'}]}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, rule='type')
- data__packagedir_is_dict = isinstance(data__packagedir, dict)
- if data__packagedir_is_dict:
- data__packagedir_keys = set(data__packagedir.keys())
- for data__packagedir_key, data__packagedir_val in data__packagedir.items():
- if REGEX_PATTERNS['^.*$'].search(data__packagedir_key):
- if data__packagedir_key in data__packagedir_keys:
- data__packagedir_keys.remove(data__packagedir_key)
- if not isinstance(data__packagedir_val, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-dir.{data__packagedir_key}".format(**locals()) + " must be string", value=data__packagedir_val, name="" + (name_prefix or "data") + ".package-dir.{data__packagedir_key}".format(**locals()) + "", definition={'type': 'string'}, rule='type')
- if data__packagedir_keys:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-dir must not contain "+str(data__packagedir_keys)+" properties", value=data__packagedir, name="" + (name_prefix or "data") + ".package-dir", definition={'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'const': ''}, {'$id': '#/definitions/package-name', 'title': 'Valid package name', 'description': 'Valid package name (importable or PEP 561).', 'type': 'string', 'anyOf': [{'format': 'python-module-name'}, {'format': 'pep561-stub-name'}]}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, rule='additionalProperties')
- data__packagedir_len = len(data__packagedir)
- if data__packagedir_len != 0:
- data__packagedir_property_names = True
- for data__packagedir_key in data__packagedir:
- try:
- data__packagedir_key_one_of_count2 = 0
- if data__packagedir_key_one_of_count2 < 2:
- try:
- if data__packagedir_key != "":
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-dir must be same as const definition: ", value=data__packagedir_key, name="" + (name_prefix or "data") + ".package-dir", definition={'const': ''}, rule='const')
- data__packagedir_key_one_of_count2 += 1
- except JsonSchemaValueException: pass
- if data__packagedir_key_one_of_count2 < 2:
- try:
- validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_package_name(data__packagedir_key, custom_formats, (name_prefix or "data") + ".package-dir")
- data__packagedir_key_one_of_count2 += 1
- except JsonSchemaValueException: pass
- if data__packagedir_key_one_of_count2 != 1:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-dir must be valid exactly by one definition" + (" (" + str(data__packagedir_key_one_of_count2) + " matches found)"), value=data__packagedir_key, name="" + (name_prefix or "data") + ".package-dir", definition={'oneOf': [{'const': ''}, {'$id': '#/definitions/package-name', 'title': 'Valid package name', 'description': 'Valid package name (importable or PEP 561).', 'type': 'string', 'anyOf': [{'format': 'python-module-name'}, {'format': 'pep561-stub-name'}]}]}, rule='oneOf')
- except JsonSchemaValueException:
- data__packagedir_property_names = False
- if not data__packagedir_property_names:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-dir must be named by propertyName definition", value=data__packagedir, name="" + (name_prefix or "data") + ".package-dir", definition={'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'const': ''}, {'$id': '#/definitions/package-name', 'title': 'Valid package name', 'description': 'Valid package name (importable or PEP 561).', 'type': 'string', 'anyOf': [{'format': 'python-module-name'}, {'format': 'pep561-stub-name'}]}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, rule='propertyNames')
- if "package-data" in data_keys:
- data_keys.remove("package-data")
- data__packagedata = data["package-data"]
- if not isinstance(data__packagedata, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data must be object", value=data__packagedata, name="" + (name_prefix or "data") + ".package-data", definition={'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, rule='type')
- data__packagedata_is_dict = isinstance(data__packagedata, dict)
- if data__packagedata_is_dict:
- data__packagedata_keys = set(data__packagedata.keys())
- for data__packagedata_key, data__packagedata_val in data__packagedata.items():
- if REGEX_PATTERNS['^.*$'].search(data__packagedata_key):
- if data__packagedata_key in data__packagedata_keys:
- data__packagedata_keys.remove(data__packagedata_key)
- if not isinstance(data__packagedata_val, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data.{data__packagedata_key}".format(**locals()) + " must be array", value=data__packagedata_val, name="" + (name_prefix or "data") + ".package-data.{data__packagedata_key}".format(**locals()) + "", definition={'type': 'array', 'items': {'type': 'string'}}, rule='type')
- data__packagedata_val_is_list = isinstance(data__packagedata_val, (list, tuple))
- if data__packagedata_val_is_list:
- data__packagedata_val_len = len(data__packagedata_val)
- for data__packagedata_val_x, data__packagedata_val_item in enumerate(data__packagedata_val):
- if not isinstance(data__packagedata_val_item, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data.{data__packagedata_key}[{data__packagedata_val_x}]".format(**locals()) + " must be string", value=data__packagedata_val_item, name="" + (name_prefix or "data") + ".package-data.{data__packagedata_key}[{data__packagedata_val_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type')
- if data__packagedata_keys:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data must not contain "+str(data__packagedata_keys)+" properties", value=data__packagedata, name="" + (name_prefix or "data") + ".package-data", definition={'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, rule='additionalProperties')
- data__packagedata_len = len(data__packagedata)
- if data__packagedata_len != 0:
- data__packagedata_property_names = True
- for data__packagedata_key in data__packagedata:
- try:
- data__packagedata_key_one_of_count3 = 0
- if data__packagedata_key_one_of_count3 < 2:
- try:
- if isinstance(data__packagedata_key, str):
- if not custom_formats["python-module-name"](data__packagedata_key):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data must be python-module-name", value=data__packagedata_key, name="" + (name_prefix or "data") + ".package-data", definition={'format': 'python-module-name'}, rule='format')
- data__packagedata_key_one_of_count3 += 1
- except JsonSchemaValueException: pass
- if data__packagedata_key_one_of_count3 < 2:
- try:
- if data__packagedata_key != "*":
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data must be same as const definition: *", value=data__packagedata_key, name="" + (name_prefix or "data") + ".package-data", definition={'const': '*'}, rule='const')
- data__packagedata_key_one_of_count3 += 1
- except JsonSchemaValueException: pass
- if data__packagedata_key_one_of_count3 != 1:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data must be valid exactly by one definition" + (" (" + str(data__packagedata_key_one_of_count3) + " matches found)"), value=data__packagedata_key, name="" + (name_prefix or "data") + ".package-data", definition={'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, rule='oneOf')
- except JsonSchemaValueException:
- data__packagedata_property_names = False
- if not data__packagedata_property_names:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".package-data must be named by propertyName definition", value=data__packagedata, name="" + (name_prefix or "data") + ".package-data", definition={'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, rule='propertyNames')
- if "include-package-data" in data_keys:
- data_keys.remove("include-package-data")
- data__includepackagedata = data["include-package-data"]
- if not isinstance(data__includepackagedata, (bool)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".include-package-data must be boolean", value=data__includepackagedata, name="" + (name_prefix or "data") + ".include-package-data", definition={'$$description': ['Automatically include any data files inside the package directories', 'that are specified by ``MANIFEST.in``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'boolean'}, rule='type')
- if "exclude-package-data" in data_keys:
- data_keys.remove("exclude-package-data")
- data__excludepackagedata = data["exclude-package-data"]
- if not isinstance(data__excludepackagedata, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data must be object", value=data__excludepackagedata, name="" + (name_prefix or "data") + ".exclude-package-data", definition={'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, rule='type')
- data__excludepackagedata_is_dict = isinstance(data__excludepackagedata, dict)
- if data__excludepackagedata_is_dict:
- data__excludepackagedata_keys = set(data__excludepackagedata.keys())
- for data__excludepackagedata_key, data__excludepackagedata_val in data__excludepackagedata.items():
- if REGEX_PATTERNS['^.*$'].search(data__excludepackagedata_key):
- if data__excludepackagedata_key in data__excludepackagedata_keys:
- data__excludepackagedata_keys.remove(data__excludepackagedata_key)
- if not isinstance(data__excludepackagedata_val, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data.{data__excludepackagedata_key}".format(**locals()) + " must be array", value=data__excludepackagedata_val, name="" + (name_prefix or "data") + ".exclude-package-data.{data__excludepackagedata_key}".format(**locals()) + "", definition={'type': 'array', 'items': {'type': 'string'}}, rule='type')
- data__excludepackagedata_val_is_list = isinstance(data__excludepackagedata_val, (list, tuple))
- if data__excludepackagedata_val_is_list:
- data__excludepackagedata_val_len = len(data__excludepackagedata_val)
- for data__excludepackagedata_val_x, data__excludepackagedata_val_item in enumerate(data__excludepackagedata_val):
- if not isinstance(data__excludepackagedata_val_item, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data.{data__excludepackagedata_key}[{data__excludepackagedata_val_x}]".format(**locals()) + " must be string", value=data__excludepackagedata_val_item, name="" + (name_prefix or "data") + ".exclude-package-data.{data__excludepackagedata_key}[{data__excludepackagedata_val_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type')
- if data__excludepackagedata_keys:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data must not contain "+str(data__excludepackagedata_keys)+" properties", value=data__excludepackagedata, name="" + (name_prefix or "data") + ".exclude-package-data", definition={'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, rule='additionalProperties')
- data__excludepackagedata_len = len(data__excludepackagedata)
- if data__excludepackagedata_len != 0:
- data__excludepackagedata_property_names = True
- for data__excludepackagedata_key in data__excludepackagedata:
- try:
- data__excludepackagedata_key_one_of_count4 = 0
- if data__excludepackagedata_key_one_of_count4 < 2:
- try:
- if isinstance(data__excludepackagedata_key, str):
- if not custom_formats["python-module-name"](data__excludepackagedata_key):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data must be python-module-name", value=data__excludepackagedata_key, name="" + (name_prefix or "data") + ".exclude-package-data", definition={'format': 'python-module-name'}, rule='format')
- data__excludepackagedata_key_one_of_count4 += 1
- except JsonSchemaValueException: pass
- if data__excludepackagedata_key_one_of_count4 < 2:
- try:
- if data__excludepackagedata_key != "*":
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data must be same as const definition: *", value=data__excludepackagedata_key, name="" + (name_prefix or "data") + ".exclude-package-data", definition={'const': '*'}, rule='const')
- data__excludepackagedata_key_one_of_count4 += 1
- except JsonSchemaValueException: pass
- if data__excludepackagedata_key_one_of_count4 != 1:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data must be valid exactly by one definition" + (" (" + str(data__excludepackagedata_key_one_of_count4) + " matches found)"), value=data__excludepackagedata_key, name="" + (name_prefix or "data") + ".exclude-package-data", definition={'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, rule='oneOf')
- except JsonSchemaValueException:
- data__excludepackagedata_property_names = False
- if not data__excludepackagedata_property_names:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".exclude-package-data must be named by propertyName definition", value=data__excludepackagedata, name="" + (name_prefix or "data") + ".exclude-package-data", definition={'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, rule='propertyNames')
- if "namespace-packages" in data_keys:
- data_keys.remove("namespace-packages")
- data__namespacepackages = data["namespace-packages"]
- if not isinstance(data__namespacepackages, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".namespace-packages must be array", value=data__namespacepackages, name="" + (name_prefix or "data") + ".namespace-packages", definition={'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'https://setuptools.pypa.io/en/latest/userguide/package_discovery.html'}, rule='type')
- data__namespacepackages_is_list = isinstance(data__namespacepackages, (list, tuple))
- if data__namespacepackages_is_list:
- data__namespacepackages_len = len(data__namespacepackages)
- for data__namespacepackages_x, data__namespacepackages_item in enumerate(data__namespacepackages):
- if not isinstance(data__namespacepackages_item, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".namespace-packages[{data__namespacepackages_x}]".format(**locals()) + " must be string", value=data__namespacepackages_item, name="" + (name_prefix or "data") + ".namespace-packages[{data__namespacepackages_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'python-module-name'}, rule='type')
- if isinstance(data__namespacepackages_item, str):
- if not custom_formats["python-module-name"](data__namespacepackages_item):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".namespace-packages[{data__namespacepackages_x}]".format(**locals()) + " must be python-module-name", value=data__namespacepackages_item, name="" + (name_prefix or "data") + ".namespace-packages[{data__namespacepackages_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'python-module-name'}, rule='format')
- if "py-modules" in data_keys:
- data_keys.remove("py-modules")
- data__pymodules = data["py-modules"]
- if not isinstance(data__pymodules, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".py-modules must be array", value=data__pymodules, name="" + (name_prefix or "data") + ".py-modules", definition={'description': 'Modules that setuptools will manipulate', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'TODO: clarify the relationship with ``packages``'}, rule='type')
- data__pymodules_is_list = isinstance(data__pymodules, (list, tuple))
- if data__pymodules_is_list:
- data__pymodules_len = len(data__pymodules)
- for data__pymodules_x, data__pymodules_item in enumerate(data__pymodules):
- if not isinstance(data__pymodules_item, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".py-modules[{data__pymodules_x}]".format(**locals()) + " must be string", value=data__pymodules_item, name="" + (name_prefix or "data") + ".py-modules[{data__pymodules_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'python-module-name'}, rule='type')
- if isinstance(data__pymodules_item, str):
- if not custom_formats["python-module-name"](data__pymodules_item):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".py-modules[{data__pymodules_x}]".format(**locals()) + " must be python-module-name", value=data__pymodules_item, name="" + (name_prefix or "data") + ".py-modules[{data__pymodules_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'python-module-name'}, rule='format')
- if "data-files" in data_keys:
- data_keys.remove("data-files")
- data__datafiles = data["data-files"]
- if not isinstance(data__datafiles, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".data-files must be object", value=data__datafiles, name="" + (name_prefix or "data") + ".data-files", definition={'$$description': ['**DEPRECATED**: dict-like structure where each key represents a directory and', 'the value is a list of glob patterns that should be installed in them.', "Please notice this don't work with wheels. See `data files support", '`_'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, rule='type')
- data__datafiles_is_dict = isinstance(data__datafiles, dict)
- if data__datafiles_is_dict:
- data__datafiles_keys = set(data__datafiles.keys())
- for data__datafiles_key, data__datafiles_val in data__datafiles.items():
- if REGEX_PATTERNS['^.*$'].search(data__datafiles_key):
- if data__datafiles_key in data__datafiles_keys:
- data__datafiles_keys.remove(data__datafiles_key)
- if not isinstance(data__datafiles_val, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".data-files.{data__datafiles_key}".format(**locals()) + " must be array", value=data__datafiles_val, name="" + (name_prefix or "data") + ".data-files.{data__datafiles_key}".format(**locals()) + "", definition={'type': 'array', 'items': {'type': 'string'}}, rule='type')
- data__datafiles_val_is_list = isinstance(data__datafiles_val, (list, tuple))
- if data__datafiles_val_is_list:
- data__datafiles_val_len = len(data__datafiles_val)
- for data__datafiles_val_x, data__datafiles_val_item in enumerate(data__datafiles_val):
- if not isinstance(data__datafiles_val_item, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".data-files.{data__datafiles_key}[{data__datafiles_val_x}]".format(**locals()) + " must be string", value=data__datafiles_val_item, name="" + (name_prefix or "data") + ".data-files.{data__datafiles_key}[{data__datafiles_val_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type')
- if "cmdclass" in data_keys:
- data_keys.remove("cmdclass")
- data__cmdclass = data["cmdclass"]
- if not isinstance(data__cmdclass, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".cmdclass must be object", value=data__cmdclass, name="" + (name_prefix or "data") + ".cmdclass", definition={'$$description': ['Mapping of distutils-style command names to ``setuptools.Command`` subclasses', 'which in turn should be represented by strings with a qualified class name', '(i.e., "dotted" form with module), e.g.::\n\n', ' cmdclass = {mycmd = "pkg.subpkg.module.CommandClass"}\n\n', 'The command class should be a directly defined at the top-level of the', 'containing module (no class nesting).'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'string', 'format': 'python-qualified-identifier'}}}, rule='type')
- data__cmdclass_is_dict = isinstance(data__cmdclass, dict)
- if data__cmdclass_is_dict:
- data__cmdclass_keys = set(data__cmdclass.keys())
- for data__cmdclass_key, data__cmdclass_val in data__cmdclass.items():
- if REGEX_PATTERNS['^.*$'].search(data__cmdclass_key):
- if data__cmdclass_key in data__cmdclass_keys:
- data__cmdclass_keys.remove(data__cmdclass_key)
- if not isinstance(data__cmdclass_val, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".cmdclass.{data__cmdclass_key}".format(**locals()) + " must be string", value=data__cmdclass_val, name="" + (name_prefix or "data") + ".cmdclass.{data__cmdclass_key}".format(**locals()) + "", definition={'type': 'string', 'format': 'python-qualified-identifier'}, rule='type')
- if isinstance(data__cmdclass_val, str):
- if not custom_formats["python-qualified-identifier"](data__cmdclass_val):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".cmdclass.{data__cmdclass_key}".format(**locals()) + " must be python-qualified-identifier", value=data__cmdclass_val, name="" + (name_prefix or "data") + ".cmdclass.{data__cmdclass_key}".format(**locals()) + "", definition={'type': 'string', 'format': 'python-qualified-identifier'}, rule='format')
- if "license-files" in data_keys:
- data_keys.remove("license-files")
- data__licensefiles = data["license-files"]
- if not isinstance(data__licensefiles, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".license-files must be array", value=data__licensefiles, name="" + (name_prefix or "data") + ".license-files", definition={'type': 'array', 'items': {'type': 'string'}, '$$description': ['PROVISIONAL: List of glob patterns for all license files being distributed.', '(might become standard with PEP 639).', "By default: ``['LICEN[CS]E*', 'COPYING*', 'NOTICE*', 'AUTHORS*']``"], '$comment': 'TODO: revise if PEP 639 is accepted. Probably ``project.license-files``?'}, rule='type')
- data__licensefiles_is_list = isinstance(data__licensefiles, (list, tuple))
- if data__licensefiles_is_list:
- data__licensefiles_len = len(data__licensefiles)
- for data__licensefiles_x, data__licensefiles_item in enumerate(data__licensefiles):
- if not isinstance(data__licensefiles_item, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".license-files[{data__licensefiles_x}]".format(**locals()) + " must be string", value=data__licensefiles_item, name="" + (name_prefix or "data") + ".license-files[{data__licensefiles_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type')
- if "dynamic" in data_keys:
- data_keys.remove("dynamic")
- data__dynamic = data["dynamic"]
- if not isinstance(data__dynamic, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic must be object", value=data__dynamic, name="" + (name_prefix or "data") + ".dynamic", definition={'type': 'object', 'description': 'Instructions for loading :pep:`621`-related metadata dynamically', 'additionalProperties': False, 'properties': {'version': {'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}]}, 'classifiers': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'description': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'dependencies': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'entry-points': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'optional-dependencies': {'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}}}, 'readme': {'anyOf': [{'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}}}, rule='type')
- data__dynamic_is_dict = isinstance(data__dynamic, dict)
- if data__dynamic_is_dict:
- data__dynamic_keys = set(data__dynamic.keys())
- if "version" in data__dynamic_keys:
- data__dynamic_keys.remove("version")
- data__dynamic__version = data__dynamic["version"]
- data__dynamic__version_one_of_count5 = 0
- if data__dynamic__version_one_of_count5 < 2:
- try:
- validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_attr_directive(data__dynamic__version, custom_formats, (name_prefix or "data") + ".dynamic.version")
- data__dynamic__version_one_of_count5 += 1
- except JsonSchemaValueException: pass
- if data__dynamic__version_one_of_count5 < 2:
- try:
- validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data__dynamic__version, custom_formats, (name_prefix or "data") + ".dynamic.version")
- data__dynamic__version_one_of_count5 += 1
- except JsonSchemaValueException: pass
- if data__dynamic__version_one_of_count5 != 1:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.version must be valid exactly by one definition" + (" (" + str(data__dynamic__version_one_of_count5) + " matches found)"), value=data__dynamic__version, name="" + (name_prefix or "data") + ".dynamic.version", definition={'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}]}, rule='oneOf')
- if "classifiers" in data__dynamic_keys:
- data__dynamic_keys.remove("classifiers")
- data__dynamic__classifiers = data__dynamic["classifiers"]
- validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data__dynamic__classifiers, custom_formats, (name_prefix or "data") + ".dynamic.classifiers")
- if "description" in data__dynamic_keys:
- data__dynamic_keys.remove("description")
- data__dynamic__description = data__dynamic["description"]
- validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data__dynamic__description, custom_formats, (name_prefix or "data") + ".dynamic.description")
- if "dependencies" in data__dynamic_keys:
- data__dynamic_keys.remove("dependencies")
- data__dynamic__dependencies = data__dynamic["dependencies"]
- validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data__dynamic__dependencies, custom_formats, (name_prefix or "data") + ".dynamic.dependencies")
- if "entry-points" in data__dynamic_keys:
- data__dynamic_keys.remove("entry-points")
- data__dynamic__entrypoints = data__dynamic["entry-points"]
- validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data__dynamic__entrypoints, custom_formats, (name_prefix or "data") + ".dynamic.entry-points")
- if "optional-dependencies" in data__dynamic_keys:
- data__dynamic_keys.remove("optional-dependencies")
- data__dynamic__optionaldependencies = data__dynamic["optional-dependencies"]
- if not isinstance(data__dynamic__optionaldependencies, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.optional-dependencies must be object", value=data__dynamic__optionaldependencies, name="" + (name_prefix or "data") + ".dynamic.optional-dependencies", definition={'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}}}, rule='type')
- data__dynamic__optionaldependencies_is_dict = isinstance(data__dynamic__optionaldependencies, dict)
- if data__dynamic__optionaldependencies_is_dict:
- data__dynamic__optionaldependencies_keys = set(data__dynamic__optionaldependencies.keys())
- for data__dynamic__optionaldependencies_key, data__dynamic__optionaldependencies_val in data__dynamic__optionaldependencies.items():
- if REGEX_PATTERNS['.+'].search(data__dynamic__optionaldependencies_key):
- if data__dynamic__optionaldependencies_key in data__dynamic__optionaldependencies_keys:
- data__dynamic__optionaldependencies_keys.remove(data__dynamic__optionaldependencies_key)
- validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data__dynamic__optionaldependencies_val, custom_formats, (name_prefix or "data") + ".dynamic.optional-dependencies.{data__dynamic__optionaldependencies_key}".format(**locals()))
- if data__dynamic__optionaldependencies_keys:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.optional-dependencies must not contain "+str(data__dynamic__optionaldependencies_keys)+" properties", value=data__dynamic__optionaldependencies, name="" + (name_prefix or "data") + ".dynamic.optional-dependencies", definition={'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}}}, rule='additionalProperties')
- data__dynamic__optionaldependencies_len = len(data__dynamic__optionaldependencies)
- if data__dynamic__optionaldependencies_len != 0:
- data__dynamic__optionaldependencies_property_names = True
- for data__dynamic__optionaldependencies_key in data__dynamic__optionaldependencies:
- try:
- if isinstance(data__dynamic__optionaldependencies_key, str):
- if not custom_formats["python-identifier"](data__dynamic__optionaldependencies_key):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.optional-dependencies must be python-identifier", value=data__dynamic__optionaldependencies_key, name="" + (name_prefix or "data") + ".dynamic.optional-dependencies", definition={'format': 'python-identifier'}, rule='format')
- except JsonSchemaValueException:
- data__dynamic__optionaldependencies_property_names = False
- if not data__dynamic__optionaldependencies_property_names:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.optional-dependencies must be named by propertyName definition", value=data__dynamic__optionaldependencies, name="" + (name_prefix or "data") + ".dynamic.optional-dependencies", definition={'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}}}, rule='propertyNames')
- if "readme" in data__dynamic_keys:
- data__dynamic_keys.remove("readme")
- data__dynamic__readme = data__dynamic["readme"]
- data__dynamic__readme_any_of_count6 = 0
- if not data__dynamic__readme_any_of_count6:
- try:
- validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data__dynamic__readme, custom_formats, (name_prefix or "data") + ".dynamic.readme")
- data__dynamic__readme_any_of_count6 += 1
- except JsonSchemaValueException: pass
- if not data__dynamic__readme_any_of_count6:
- try:
- data__dynamic__readme_is_dict = isinstance(data__dynamic__readme, dict)
- if data__dynamic__readme_is_dict:
- data__dynamic__readme_keys = set(data__dynamic__readme.keys())
- if "content-type" in data__dynamic__readme_keys:
- data__dynamic__readme_keys.remove("content-type")
- data__dynamic__readme__contenttype = data__dynamic__readme["content-type"]
- if not isinstance(data__dynamic__readme__contenttype, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.readme.content-type must be string", value=data__dynamic__readme__contenttype, name="" + (name_prefix or "data") + ".dynamic.readme.content-type", definition={'type': 'string'}, rule='type')
- data__dynamic__readme_any_of_count6 += 1
- except JsonSchemaValueException: pass
- if not data__dynamic__readme_any_of_count6:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.readme cannot be validated by any definition", value=data__dynamic__readme, name="" + (name_prefix or "data") + ".dynamic.readme", definition={'anyOf': [{'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}, rule='anyOf')
- data__dynamic__readme_is_dict = isinstance(data__dynamic__readme, dict)
- if data__dynamic__readme_is_dict:
- data__dynamic__readme_len = len(data__dynamic__readme)
- if not all(prop in data__dynamic__readme for prop in ['file']):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic.readme must contain ['file'] properties", value=data__dynamic__readme, name="" + (name_prefix or "data") + ".dynamic.readme", definition={'anyOf': [{'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}, rule='required')
- if data__dynamic_keys:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic must not contain "+str(data__dynamic_keys)+" properties", value=data__dynamic, name="" + (name_prefix or "data") + ".dynamic", definition={'type': 'object', 'description': 'Instructions for loading :pep:`621`-related metadata dynamically', 'additionalProperties': False, 'properties': {'version': {'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}]}, 'classifiers': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'description': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'dependencies': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'entry-points': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'optional-dependencies': {'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}}}, 'readme': {'anyOf': [{'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}}}, rule='additionalProperties')
- if data_keys:
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must not contain "+str(data_keys)+" properties", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://setuptools.pypa.io/en/latest/references/keywords.html', 'title': '``tool.setuptools`` table', '$$description': ['Please notice for the time being the ``setuptools`` project does not specify', 'a way of configuring builds via ``pyproject.toml``.', 'Therefore this schema should be taken just as a *"thought experiment"* on how', 'this *might be done*, by following the principles established in', '`ini2toml `_.', 'It considers only ``setuptools`` `parameters', '`_', 'that can currently be configured via ``setup.cfg`` and are not covered by :pep:`621`', 'but intentionally excludes ``dependency_links`` and ``setup_requires``.', 'NOTE: ``scripts`` was renamed to ``script-files`` to avoid confusion with', 'entry-point based scripts (defined in :pep:`621`).'], 'type': 'object', 'additionalProperties': False, 'properties': {'platforms': {'type': 'array', 'items': {'type': 'string'}}, 'provides': {'$$description': ['Package and virtual package names contained within this package', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'obsoletes': {'$$description': ['Packages which this package renders obsolete', '**(not supported by pip)**'], 'type': 'array', 'items': {'type': 'string', 'format': 'pep508-identifier'}}, 'zip-safe': {'description': 'Whether the project can be safely installed and run from a zip file.', 'type': 'boolean'}, 'script-files': {'description': 'Legacy way of defining scripts (entry-points are preferred).', 'type': 'array', 'items': {'type': 'string'}, '$comment': 'TODO: is this field deprecated/should be removed?'}, 'eager-resources': {'$$description': ['Resources that should be extracted together, if any of them is needed,', 'or if any C extensions included in the project are imported.'], 'type': 'array', 'items': {'type': 'string'}}, 'packages': {'$$description': ['Packages that should be included in the distribution.', 'It can be given either as a list of package identifiers', 'or as a ``dict``-like structure with a single key ``find``', 'which corresponds to a dynamic call to', '``setuptools.config.expand.find_packages`` function.', 'The ``find`` key is associated with a nested ``dict``-like structure that can', 'contain ``where``, ``include``, ``exclude`` and ``namespaces`` keys,', 'mimicking the keyword arguments of the associated function.'], 'oneOf': [{'title': 'Array of Python package identifiers', 'type': 'array', 'items': {'$id': '#/definitions/package-name', 'title': 'Valid package name', 'description': 'Valid package name (importable or PEP 561).', 'type': 'string', 'anyOf': [{'format': 'python-module-name'}, {'format': 'pep561-stub-name'}]}}, {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}]}, 'package-dir': {'$$description': [':class:`dict`-like structure mapping from package names to directories where their', 'code can be found.', 'The empty string (as key) means that all packages are contained inside', 'the given directory will be included in the distribution.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'const': ''}, {'$id': '#/definitions/package-name', 'title': 'Valid package name', 'description': 'Valid package name (importable or PEP 561).', 'type': 'string', 'anyOf': [{'format': 'python-module-name'}, {'format': 'pep561-stub-name'}]}]}, 'patternProperties': {'^.*$': {'type': 'string'}}}, 'package-data': {'$$description': ['Mapping from package names to lists of glob patterns.', 'Usually this option is not needed when using ``include-package-data = true``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'include-package-data': {'$$description': ['Automatically include any data files inside the package directories', 'that are specified by ``MANIFEST.in``', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'boolean'}, 'exclude-package-data': {'$$description': ['Mapping from package names to lists of glob patterns that should be excluded', 'For more information on how to include data files, check ``setuptools`` `docs', '`_.'], 'type': 'object', 'additionalProperties': False, 'propertyNames': {'oneOf': [{'format': 'python-module-name'}, {'const': '*'}]}, 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'namespace-packages': {'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'https://setuptools.pypa.io/en/latest/userguide/package_discovery.html'}, 'py-modules': {'description': 'Modules that setuptools will manipulate', 'type': 'array', 'items': {'type': 'string', 'format': 'python-module-name'}, '$comment': 'TODO: clarify the relationship with ``packages``'}, 'data-files': {'$$description': ['**DEPRECATED**: dict-like structure where each key represents a directory and', 'the value is a list of glob patterns that should be installed in them.', "Please notice this don't work with wheels. See `data files support", '`_'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'array', 'items': {'type': 'string'}}}}, 'cmdclass': {'$$description': ['Mapping of distutils-style command names to ``setuptools.Command`` subclasses', 'which in turn should be represented by strings with a qualified class name', '(i.e., "dotted" form with module), e.g.::\n\n', ' cmdclass = {mycmd = "pkg.subpkg.module.CommandClass"}\n\n', 'The command class should be a directly defined at the top-level of the', 'containing module (no class nesting).'], 'type': 'object', 'patternProperties': {'^.*$': {'type': 'string', 'format': 'python-qualified-identifier'}}}, 'license-files': {'type': 'array', 'items': {'type': 'string'}, '$$description': ['PROVISIONAL: List of glob patterns for all license files being distributed.', '(might become standard with PEP 639).', "By default: ``['LICEN[CS]E*', 'COPYING*', 'NOTICE*', 'AUTHORS*']``"], '$comment': 'TODO: revise if PEP 639 is accepted. Probably ``project.license-files``?'}, 'dynamic': {'type': 'object', 'description': 'Instructions for loading :pep:`621`-related metadata dynamically', 'additionalProperties': False, 'properties': {'version': {'$$description': ['A version dynamically loaded via either the ``attr:`` or ``file:``', 'directives. Please make sure the given file or attribute respects :pep:`440`.'], 'oneOf': [{'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}]}, 'classifiers': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'description': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'dependencies': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'entry-points': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'optional-dependencies': {'type': 'object', 'propertyNames': {'format': 'python-identifier'}, 'additionalProperties': False, 'patternProperties': {'.+': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}}}, 'readme': {'anyOf': [{'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, {'properties': {'content-type': {'type': 'string'}}}], 'required': ['file']}}}}, 'definitions': {'package-name': {'$id': '#/definitions/package-name', 'title': 'Valid package name', 'description': 'Valid package name (importable or PEP 561).', 'type': 'string', 'anyOf': [{'format': 'python-module-name'}, {'format': 'pep561-stub-name'}]}, 'file-directive': {'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, 'attr-directive': {'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, 'find-directive': {'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}}}, rule='additionalProperties')
- return data
-
-def validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_file_directive(data, custom_formats={}, name_prefix=None):
- if not isinstance(data, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, rule='type')
- data_is_dict = isinstance(data, dict)
- if data_is_dict:
- data_len = len(data)
- if not all(prop in data for prop in ['file']):
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must contain ['file'] properties", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, rule='required')
- data_keys = set(data.keys())
- if "file" in data_keys:
- data_keys.remove("file")
- data__file = data["file"]
- data__file_one_of_count7 = 0
- if data__file_one_of_count7 < 2:
- try:
- if not isinstance(data__file, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".file must be string", value=data__file, name="" + (name_prefix or "data") + ".file", definition={'type': 'string'}, rule='type')
- data__file_one_of_count7 += 1
- except JsonSchemaValueException: pass
- if data__file_one_of_count7 < 2:
- try:
- if not isinstance(data__file, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".file must be array", value=data__file, name="" + (name_prefix or "data") + ".file", definition={'type': 'array', 'items': {'type': 'string'}}, rule='type')
- data__file_is_list = isinstance(data__file, (list, tuple))
- if data__file_is_list:
- data__file_len = len(data__file)
- for data__file_x, data__file_item in enumerate(data__file):
- if not isinstance(data__file_item, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".file[{data__file_x}]".format(**locals()) + " must be string", value=data__file_item, name="" + (name_prefix or "data") + ".file[{data__file_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type')
- data__file_one_of_count7 += 1
- except JsonSchemaValueException: pass
- if data__file_one_of_count7 != 1:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".file must be valid exactly by one definition" + (" (" + str(data__file_one_of_count7) + " matches found)"), value=data__file, name="" + (name_prefix or "data") + ".file", definition={'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}, rule='oneOf')
- if data_keys:
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must not contain "+str(data_keys)+" properties", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/file-directive', 'title': "'file:' directive", 'description': 'Value is read from a file (or list of files and then concatenated)', 'type': 'object', 'additionalProperties': False, 'properties': {'file': {'oneOf': [{'type': 'string'}, {'type': 'array', 'items': {'type': 'string'}}]}}, 'required': ['file']}, rule='additionalProperties')
- return data
-
-def validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_attr_directive(data, custom_formats={}, name_prefix=None):
- if not isinstance(data, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, rule='type')
- data_is_dict = isinstance(data, dict)
- if data_is_dict:
- data_len = len(data)
- if not all(prop in data for prop in ['attr']):
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must contain ['attr'] properties", value=data, name="" + (name_prefix or "data") + "", definition={'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, rule='required')
- data_keys = set(data.keys())
- if "attr" in data_keys:
- data_keys.remove("attr")
- data__attr = data["attr"]
- if not isinstance(data__attr, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".attr must be string", value=data__attr, name="" + (name_prefix or "data") + ".attr", definition={'type': 'string'}, rule='type')
- if data_keys:
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must not contain "+str(data_keys)+" properties", value=data, name="" + (name_prefix or "data") + "", definition={'title': "'attr:' directive", '$id': '#/definitions/attr-directive', '$$description': ['Value is read from a module attribute. Supports callables and iterables;', 'unsupported types are cast via ``str()``'], 'type': 'object', 'additionalProperties': False, 'properties': {'attr': {'type': 'string'}}, 'required': ['attr']}, rule='additionalProperties')
- return data
-
-def validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_find_directive(data, custom_formats={}, name_prefix=None):
- if not isinstance(data, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}, rule='type')
- data_is_dict = isinstance(data, dict)
- if data_is_dict:
- data_keys = set(data.keys())
- if "find" in data_keys:
- data_keys.remove("find")
- data__find = data["find"]
- if not isinstance(data__find, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".find must be object", value=data__find, name="" + (name_prefix or "data") + ".find", definition={'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}, rule='type')
- data__find_is_dict = isinstance(data__find, dict)
- if data__find_is_dict:
- data__find_keys = set(data__find.keys())
- if "where" in data__find_keys:
- data__find_keys.remove("where")
- data__find__where = data__find["where"]
- if not isinstance(data__find__where, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".find.where must be array", value=data__find__where, name="" + (name_prefix or "data") + ".find.where", definition={'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, rule='type')
- data__find__where_is_list = isinstance(data__find__where, (list, tuple))
- if data__find__where_is_list:
- data__find__where_len = len(data__find__where)
- for data__find__where_x, data__find__where_item in enumerate(data__find__where):
- if not isinstance(data__find__where_item, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".find.where[{data__find__where_x}]".format(**locals()) + " must be string", value=data__find__where_item, name="" + (name_prefix or "data") + ".find.where[{data__find__where_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type')
- if "exclude" in data__find_keys:
- data__find_keys.remove("exclude")
- data__find__exclude = data__find["exclude"]
- if not isinstance(data__find__exclude, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".find.exclude must be array", value=data__find__exclude, name="" + (name_prefix or "data") + ".find.exclude", definition={'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, rule='type')
- data__find__exclude_is_list = isinstance(data__find__exclude, (list, tuple))
- if data__find__exclude_is_list:
- data__find__exclude_len = len(data__find__exclude)
- for data__find__exclude_x, data__find__exclude_item in enumerate(data__find__exclude):
- if not isinstance(data__find__exclude_item, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".find.exclude[{data__find__exclude_x}]".format(**locals()) + " must be string", value=data__find__exclude_item, name="" + (name_prefix or "data") + ".find.exclude[{data__find__exclude_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type')
- if "include" in data__find_keys:
- data__find_keys.remove("include")
- data__find__include = data__find["include"]
- if not isinstance(data__find__include, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".find.include must be array", value=data__find__include, name="" + (name_prefix or "data") + ".find.include", definition={'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, rule='type')
- data__find__include_is_list = isinstance(data__find__include, (list, tuple))
- if data__find__include_is_list:
- data__find__include_len = len(data__find__include)
- for data__find__include_x, data__find__include_item in enumerate(data__find__include):
- if not isinstance(data__find__include_item, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".find.include[{data__find__include_x}]".format(**locals()) + " must be string", value=data__find__include_item, name="" + (name_prefix or "data") + ".find.include[{data__find__include_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type')
- if "namespaces" in data__find_keys:
- data__find_keys.remove("namespaces")
- data__find__namespaces = data__find["namespaces"]
- if not isinstance(data__find__namespaces, (bool)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".find.namespaces must be boolean", value=data__find__namespaces, name="" + (name_prefix or "data") + ".find.namespaces", definition={'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}, rule='type')
- if data__find_keys:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".find must not contain "+str(data__find_keys)+" properties", value=data__find, name="" + (name_prefix or "data") + ".find", definition={'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}, rule='additionalProperties')
- if data_keys:
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must not contain "+str(data_keys)+" properties", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/find-directive', 'title': "'find:' directive", 'type': 'object', 'additionalProperties': False, 'properties': {'find': {'type': 'object', '$$description': ['Dynamic `package discovery', '`_.'], 'additionalProperties': False, 'properties': {'where': {'description': 'Directories to be searched for packages (Unix-style relative path)', 'type': 'array', 'items': {'type': 'string'}}, 'exclude': {'type': 'array', '$$description': ['Exclude packages that match the values listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'include': {'type': 'array', '$$description': ['Restrict the found packages to just the ones listed in this field.', "Can container shell-style wildcards (e.g. ``'pkg.*'``)"], 'items': {'type': 'string'}}, 'namespaces': {'type': 'boolean', '$$description': ['When ``True``, directories without a ``__init__.py`` file will also', 'be scanned for :pep:`420`-style implicit namespaces']}}}}}, rule='additionalProperties')
- return data
-
-def validate_https___setuptools_pypa_io_en_latest_references_keywords_html__definitions_package_name(data, custom_formats={}, name_prefix=None):
- if not isinstance(data, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must be string", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/package-name', 'title': 'Valid package name', 'description': 'Valid package name (importable or PEP 561).', 'type': 'string', 'anyOf': [{'format': 'python-module-name'}, {'format': 'pep561-stub-name'}]}, rule='type')
- data_any_of_count8 = 0
- if not data_any_of_count8:
- try:
- if isinstance(data, str):
- if not custom_formats["python-module-name"](data):
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must be python-module-name", value=data, name="" + (name_prefix or "data") + "", definition={'format': 'python-module-name'}, rule='format')
- data_any_of_count8 += 1
- except JsonSchemaValueException: pass
- if not data_any_of_count8:
- try:
- if isinstance(data, str):
- if not custom_formats["pep561-stub-name"](data):
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must be pep561-stub-name", value=data, name="" + (name_prefix or "data") + "", definition={'format': 'pep561-stub-name'}, rule='format')
- data_any_of_count8 += 1
- except JsonSchemaValueException: pass
- if not data_any_of_count8:
- raise JsonSchemaValueException("" + (name_prefix or "data") + " cannot be validated by any definition", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/package-name', 'title': 'Valid package name', 'description': 'Valid package name (importable or PEP 561).', 'type': 'string', 'anyOf': [{'format': 'python-module-name'}, {'format': 'pep561-stub-name'}]}, rule='anyOf')
- return data
-
-def validate_https___docs_python_org_3_install(data, custom_formats={}, name_prefix=None):
- if not isinstance(data, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://docs.python.org/3/install/', 'title': '``tool.distutils`` table', '$$description': ['Originally, ``distutils`` allowed developers to configure arguments for', '``setup.py`` scripts via `distutils configuration files', '`_.', '``tool.distutils`` subtables could be used with the same purpose', '(NOT CURRENTLY IMPLEMENTED).'], 'type': 'object', 'properties': {'global': {'type': 'object', 'description': 'Global options applied to all ``distutils`` commands'}}, 'patternProperties': {'.+': {'type': 'object'}}, '$comment': 'TODO: Is there a practical way of making this schema more specific?'}, rule='type')
- data_is_dict = isinstance(data, dict)
- if data_is_dict:
- data_keys = set(data.keys())
- if "global" in data_keys:
- data_keys.remove("global")
- data__global = data["global"]
- if not isinstance(data__global, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".global must be object", value=data__global, name="" + (name_prefix or "data") + ".global", definition={'type': 'object', 'description': 'Global options applied to all ``distutils`` commands'}, rule='type')
- for data_key, data_val in data.items():
- if REGEX_PATTERNS['.+'].search(data_key):
- if data_key in data_keys:
- data_keys.remove(data_key)
- if not isinstance(data_val, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".{data_key}".format(**locals()) + " must be object", value=data_val, name="" + (name_prefix or "data") + ".{data_key}".format(**locals()) + "", definition={'type': 'object'}, rule='type')
- return data
-
-def validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata(data, custom_formats={}, name_prefix=None):
- if not isinstance(data, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-project-metadata/', 'title': 'Package metadata stored in the ``project`` table', '$$description': ['Data structure for the **project** table inside ``pyproject.toml``', '(as initially defined in :pep:`621`)'], 'type': 'object', 'properties': {'name': {'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, 'version': {'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, 'description': {'type': 'string', '$$description': ['The `summary description of the project', '`_']}, 'readme': {'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, 'requires-python': {'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, 'license': {'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, 'authors': {'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://peps.python.org/pep-0621/#authors-maintainers', 'type': 'object', 'additionalProperties': False, 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, 'maintainers': {'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://peps.python.org/pep-0621/#authors-maintainers', 'type': 'object', 'additionalProperties': False, 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, 'keywords': {'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, 'classifiers': {'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, 'urls': {'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, 'scripts': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'gui-scripts': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'entry-points': {'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}}}, 'dependencies': {'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}, 'optional-dependencies': {'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, 'dynamic': {'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}}, 'required': ['name'], 'additionalProperties': False, 'if': {'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, 'then': {'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, 'definitions': {'author': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://peps.python.org/pep-0621/#authors-maintainers', 'type': 'object', 'additionalProperties': False, 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, 'entry-point-group': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'dependency': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}, rule='type')
- data_is_dict = isinstance(data, dict)
- if data_is_dict:
- data_len = len(data)
- if not all(prop in data for prop in ['name']):
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must contain ['name'] properties", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-project-metadata/', 'title': 'Package metadata stored in the ``project`` table', '$$description': ['Data structure for the **project** table inside ``pyproject.toml``', '(as initially defined in :pep:`621`)'], 'type': 'object', 'properties': {'name': {'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, 'version': {'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, 'description': {'type': 'string', '$$description': ['The `summary description of the project', '`_']}, 'readme': {'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, 'requires-python': {'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, 'license': {'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, 'authors': {'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://peps.python.org/pep-0621/#authors-maintainers', 'type': 'object', 'additionalProperties': False, 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, 'maintainers': {'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://peps.python.org/pep-0621/#authors-maintainers', 'type': 'object', 'additionalProperties': False, 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, 'keywords': {'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, 'classifiers': {'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, 'urls': {'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, 'scripts': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'gui-scripts': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'entry-points': {'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}}}, 'dependencies': {'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}, 'optional-dependencies': {'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, 'dynamic': {'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}}, 'required': ['name'], 'additionalProperties': False, 'if': {'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, 'then': {'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, 'definitions': {'author': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://peps.python.org/pep-0621/#authors-maintainers', 'type': 'object', 'additionalProperties': False, 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, 'entry-point-group': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'dependency': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}, rule='required')
- data_keys = set(data.keys())
- if "name" in data_keys:
- data_keys.remove("name")
- data__name = data["name"]
- if not isinstance(data__name, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".name must be string", value=data__name, name="" + (name_prefix or "data") + ".name", definition={'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, rule='type')
- if isinstance(data__name, str):
- if not custom_formats["pep508-identifier"](data__name):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".name must be pep508-identifier", value=data__name, name="" + (name_prefix or "data") + ".name", definition={'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, rule='format')
- if "version" in data_keys:
- data_keys.remove("version")
- data__version = data["version"]
- if not isinstance(data__version, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".version must be string", value=data__version, name="" + (name_prefix or "data") + ".version", definition={'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, rule='type')
- if isinstance(data__version, str):
- if not custom_formats["pep440"](data__version):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".version must be pep440", value=data__version, name="" + (name_prefix or "data") + ".version", definition={'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, rule='format')
- if "description" in data_keys:
- data_keys.remove("description")
- data__description = data["description"]
- if not isinstance(data__description, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".description must be string", value=data__description, name="" + (name_prefix or "data") + ".description", definition={'type': 'string', '$$description': ['The `summary description of the project', '`_']}, rule='type')
- if "readme" in data_keys:
- data_keys.remove("readme")
- data__readme = data["readme"]
- data__readme_one_of_count9 = 0
- if data__readme_one_of_count9 < 2:
- try:
- if not isinstance(data__readme, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme must be string", value=data__readme, name="" + (name_prefix or "data") + ".readme", definition={'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, rule='type')
- data__readme_one_of_count9 += 1
- except JsonSchemaValueException: pass
- if data__readme_one_of_count9 < 2:
- try:
- if not isinstance(data__readme, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme must be object", value=data__readme, name="" + (name_prefix or "data") + ".readme", definition={'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}, rule='type')
- data__readme_any_of_count10 = 0
- if not data__readme_any_of_count10:
- try:
- data__readme_is_dict = isinstance(data__readme, dict)
- if data__readme_is_dict:
- data__readme_len = len(data__readme)
- if not all(prop in data__readme for prop in ['file']):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme must contain ['file'] properties", value=data__readme, name="" + (name_prefix or "data") + ".readme", definition={'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, rule='required')
- data__readme_keys = set(data__readme.keys())
- if "file" in data__readme_keys:
- data__readme_keys.remove("file")
- data__readme__file = data__readme["file"]
- if not isinstance(data__readme__file, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme.file must be string", value=data__readme__file, name="" + (name_prefix or "data") + ".readme.file", definition={'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}, rule='type')
- data__readme_any_of_count10 += 1
- except JsonSchemaValueException: pass
- if not data__readme_any_of_count10:
- try:
- data__readme_is_dict = isinstance(data__readme, dict)
- if data__readme_is_dict:
- data__readme_len = len(data__readme)
- if not all(prop in data__readme for prop in ['text']):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme must contain ['text'] properties", value=data__readme, name="" + (name_prefix or "data") + ".readme", definition={'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}, rule='required')
- data__readme_keys = set(data__readme.keys())
- if "text" in data__readme_keys:
- data__readme_keys.remove("text")
- data__readme__text = data__readme["text"]
- if not isinstance(data__readme__text, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme.text must be string", value=data__readme__text, name="" + (name_prefix or "data") + ".readme.text", definition={'type': 'string', 'description': 'Full text describing the project.'}, rule='type')
- data__readme_any_of_count10 += 1
- except JsonSchemaValueException: pass
- if not data__readme_any_of_count10:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme cannot be validated by any definition", value=data__readme, name="" + (name_prefix or "data") + ".readme", definition={'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, rule='anyOf')
- data__readme_is_dict = isinstance(data__readme, dict)
- if data__readme_is_dict:
- data__readme_len = len(data__readme)
- if not all(prop in data__readme for prop in ['content-type']):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme must contain ['content-type'] properties", value=data__readme, name="" + (name_prefix or "data") + ".readme", definition={'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}, rule='required')
- data__readme_keys = set(data__readme.keys())
- if "content-type" in data__readme_keys:
- data__readme_keys.remove("content-type")
- data__readme__contenttype = data__readme["content-type"]
- if not isinstance(data__readme__contenttype, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme.content-type must be string", value=data__readme__contenttype, name="" + (name_prefix or "data") + ".readme.content-type", definition={'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}, rule='type')
- data__readme_one_of_count9 += 1
- except JsonSchemaValueException: pass
- if data__readme_one_of_count9 != 1:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".readme must be valid exactly by one definition" + (" (" + str(data__readme_one_of_count9) + " matches found)"), value=data__readme, name="" + (name_prefix or "data") + ".readme", definition={'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, rule='oneOf')
- if "requires-python" in data_keys:
- data_keys.remove("requires-python")
- data__requirespython = data["requires-python"]
- if not isinstance(data__requirespython, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".requires-python must be string", value=data__requirespython, name="" + (name_prefix or "data") + ".requires-python", definition={'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, rule='type')
- if isinstance(data__requirespython, str):
- if not custom_formats["pep508-versionspec"](data__requirespython):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".requires-python must be pep508-versionspec", value=data__requirespython, name="" + (name_prefix or "data") + ".requires-python", definition={'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, rule='format')
- if "license" in data_keys:
- data_keys.remove("license")
- data__license = data["license"]
- data__license_one_of_count11 = 0
- if data__license_one_of_count11 < 2:
- try:
- data__license_is_dict = isinstance(data__license, dict)
- if data__license_is_dict:
- data__license_len = len(data__license)
- if not all(prop in data__license for prop in ['file']):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".license must contain ['file'] properties", value=data__license, name="" + (name_prefix or "data") + ".license", definition={'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, rule='required')
- data__license_keys = set(data__license.keys())
- if "file" in data__license_keys:
- data__license_keys.remove("file")
- data__license__file = data__license["file"]
- if not isinstance(data__license__file, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".license.file must be string", value=data__license__file, name="" + (name_prefix or "data") + ".license.file", definition={'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}, rule='type')
- data__license_one_of_count11 += 1
- except JsonSchemaValueException: pass
- if data__license_one_of_count11 < 2:
- try:
- data__license_is_dict = isinstance(data__license, dict)
- if data__license_is_dict:
- data__license_len = len(data__license)
- if not all(prop in data__license for prop in ['text']):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".license must contain ['text'] properties", value=data__license, name="" + (name_prefix or "data") + ".license", definition={'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}, rule='required')
- data__license_keys = set(data__license.keys())
- if "text" in data__license_keys:
- data__license_keys.remove("text")
- data__license__text = data__license["text"]
- if not isinstance(data__license__text, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".license.text must be string", value=data__license__text, name="" + (name_prefix or "data") + ".license.text", definition={'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}, rule='type')
- data__license_one_of_count11 += 1
- except JsonSchemaValueException: pass
- if data__license_one_of_count11 != 1:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".license must be valid exactly by one definition" + (" (" + str(data__license_one_of_count11) + " matches found)"), value=data__license, name="" + (name_prefix or "data") + ".license", definition={'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, rule='oneOf')
- if "authors" in data_keys:
- data_keys.remove("authors")
- data__authors = data["authors"]
- if not isinstance(data__authors, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".authors must be array", value=data__authors, name="" + (name_prefix or "data") + ".authors", definition={'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://peps.python.org/pep-0621/#authors-maintainers', 'type': 'object', 'additionalProperties': False, 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, rule='type')
- data__authors_is_list = isinstance(data__authors, (list, tuple))
- if data__authors_is_list:
- data__authors_len = len(data__authors)
- for data__authors_x, data__authors_item in enumerate(data__authors):
- validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_author(data__authors_item, custom_formats, (name_prefix or "data") + ".authors[{data__authors_x}]".format(**locals()))
- if "maintainers" in data_keys:
- data_keys.remove("maintainers")
- data__maintainers = data["maintainers"]
- if not isinstance(data__maintainers, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".maintainers must be array", value=data__maintainers, name="" + (name_prefix or "data") + ".maintainers", definition={'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://peps.python.org/pep-0621/#authors-maintainers', 'type': 'object', 'additionalProperties': False, 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, rule='type')
- data__maintainers_is_list = isinstance(data__maintainers, (list, tuple))
- if data__maintainers_is_list:
- data__maintainers_len = len(data__maintainers)
- for data__maintainers_x, data__maintainers_item in enumerate(data__maintainers):
- validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_author(data__maintainers_item, custom_formats, (name_prefix or "data") + ".maintainers[{data__maintainers_x}]".format(**locals()))
- if "keywords" in data_keys:
- data_keys.remove("keywords")
- data__keywords = data["keywords"]
- if not isinstance(data__keywords, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".keywords must be array", value=data__keywords, name="" + (name_prefix or "data") + ".keywords", definition={'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, rule='type')
- data__keywords_is_list = isinstance(data__keywords, (list, tuple))
- if data__keywords_is_list:
- data__keywords_len = len(data__keywords)
- for data__keywords_x, data__keywords_item in enumerate(data__keywords):
- if not isinstance(data__keywords_item, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".keywords[{data__keywords_x}]".format(**locals()) + " must be string", value=data__keywords_item, name="" + (name_prefix or "data") + ".keywords[{data__keywords_x}]".format(**locals()) + "", definition={'type': 'string'}, rule='type')
- if "classifiers" in data_keys:
- data_keys.remove("classifiers")
- data__classifiers = data["classifiers"]
- if not isinstance(data__classifiers, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".classifiers must be array", value=data__classifiers, name="" + (name_prefix or "data") + ".classifiers", definition={'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, rule='type')
- data__classifiers_is_list = isinstance(data__classifiers, (list, tuple))
- if data__classifiers_is_list:
- data__classifiers_len = len(data__classifiers)
- for data__classifiers_x, data__classifiers_item in enumerate(data__classifiers):
- if not isinstance(data__classifiers_item, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".classifiers[{data__classifiers_x}]".format(**locals()) + " must be string", value=data__classifiers_item, name="" + (name_prefix or "data") + ".classifiers[{data__classifiers_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, rule='type')
- if isinstance(data__classifiers_item, str):
- if not custom_formats["trove-classifier"](data__classifiers_item):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".classifiers[{data__classifiers_x}]".format(**locals()) + " must be trove-classifier", value=data__classifiers_item, name="" + (name_prefix or "data") + ".classifiers[{data__classifiers_x}]".format(**locals()) + "", definition={'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, rule='format')
- if "urls" in data_keys:
- data_keys.remove("urls")
- data__urls = data["urls"]
- if not isinstance(data__urls, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".urls must be object", value=data__urls, name="" + (name_prefix or "data") + ".urls", definition={'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, rule='type')
- data__urls_is_dict = isinstance(data__urls, dict)
- if data__urls_is_dict:
- data__urls_keys = set(data__urls.keys())
- for data__urls_key, data__urls_val in data__urls.items():
- if REGEX_PATTERNS['^.+$'].search(data__urls_key):
- if data__urls_key in data__urls_keys:
- data__urls_keys.remove(data__urls_key)
- if not isinstance(data__urls_val, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".urls.{data__urls_key}".format(**locals()) + " must be string", value=data__urls_val, name="" + (name_prefix or "data") + ".urls.{data__urls_key}".format(**locals()) + "", definition={'type': 'string', 'format': 'url'}, rule='type')
- if isinstance(data__urls_val, str):
- if not custom_formats["url"](data__urls_val):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".urls.{data__urls_key}".format(**locals()) + " must be url", value=data__urls_val, name="" + (name_prefix or "data") + ".urls.{data__urls_key}".format(**locals()) + "", definition={'type': 'string', 'format': 'url'}, rule='format')
- if data__urls_keys:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".urls must not contain "+str(data__urls_keys)+" properties", value=data__urls, name="" + (name_prefix or "data") + ".urls", definition={'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, rule='additionalProperties')
- if "scripts" in data_keys:
- data_keys.remove("scripts")
- data__scripts = data["scripts"]
- validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_entry_point_group(data__scripts, custom_formats, (name_prefix or "data") + ".scripts")
- if "gui-scripts" in data_keys:
- data_keys.remove("gui-scripts")
- data__guiscripts = data["gui-scripts"]
- validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_entry_point_group(data__guiscripts, custom_formats, (name_prefix or "data") + ".gui-scripts")
- if "entry-points" in data_keys:
- data_keys.remove("entry-points")
- data__entrypoints = data["entry-points"]
- data__entrypoints_is_dict = isinstance(data__entrypoints, dict)
- if data__entrypoints_is_dict:
- data__entrypoints_keys = set(data__entrypoints.keys())
- for data__entrypoints_key, data__entrypoints_val in data__entrypoints.items():
- if REGEX_PATTERNS['^.+$'].search(data__entrypoints_key):
- if data__entrypoints_key in data__entrypoints_keys:
- data__entrypoints_keys.remove(data__entrypoints_key)
- validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_entry_point_group(data__entrypoints_val, custom_formats, (name_prefix or "data") + ".entry-points.{data__entrypoints_key}".format(**locals()))
- if data__entrypoints_keys:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".entry-points must not contain "+str(data__entrypoints_keys)+" properties", value=data__entrypoints, name="" + (name_prefix or "data") + ".entry-points", definition={'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}}}, rule='additionalProperties')
- data__entrypoints_len = len(data__entrypoints)
- if data__entrypoints_len != 0:
- data__entrypoints_property_names = True
- for data__entrypoints_key in data__entrypoints:
- try:
- if isinstance(data__entrypoints_key, str):
- if not custom_formats["python-entrypoint-group"](data__entrypoints_key):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".entry-points must be python-entrypoint-group", value=data__entrypoints_key, name="" + (name_prefix or "data") + ".entry-points", definition={'format': 'python-entrypoint-group'}, rule='format')
- except JsonSchemaValueException:
- data__entrypoints_property_names = False
- if not data__entrypoints_property_names:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".entry-points must be named by propertyName definition", value=data__entrypoints, name="" + (name_prefix or "data") + ".entry-points", definition={'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}}}, rule='propertyNames')
- if "dependencies" in data_keys:
- data_keys.remove("dependencies")
- data__dependencies = data["dependencies"]
- if not isinstance(data__dependencies, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".dependencies must be array", value=data__dependencies, name="" + (name_prefix or "data") + ".dependencies", definition={'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}, rule='type')
- data__dependencies_is_list = isinstance(data__dependencies, (list, tuple))
- if data__dependencies_is_list:
- data__dependencies_len = len(data__dependencies)
- for data__dependencies_x, data__dependencies_item in enumerate(data__dependencies):
- validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_dependency(data__dependencies_item, custom_formats, (name_prefix or "data") + ".dependencies[{data__dependencies_x}]".format(**locals()))
- if "optional-dependencies" in data_keys:
- data_keys.remove("optional-dependencies")
- data__optionaldependencies = data["optional-dependencies"]
- if not isinstance(data__optionaldependencies, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".optional-dependencies must be object", value=data__optionaldependencies, name="" + (name_prefix or "data") + ".optional-dependencies", definition={'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, rule='type')
- data__optionaldependencies_is_dict = isinstance(data__optionaldependencies, dict)
- if data__optionaldependencies_is_dict:
- data__optionaldependencies_keys = set(data__optionaldependencies.keys())
- for data__optionaldependencies_key, data__optionaldependencies_val in data__optionaldependencies.items():
- if REGEX_PATTERNS['^.+$'].search(data__optionaldependencies_key):
- if data__optionaldependencies_key in data__optionaldependencies_keys:
- data__optionaldependencies_keys.remove(data__optionaldependencies_key)
- if not isinstance(data__optionaldependencies_val, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".optional-dependencies.{data__optionaldependencies_key}".format(**locals()) + " must be array", value=data__optionaldependencies_val, name="" + (name_prefix or "data") + ".optional-dependencies.{data__optionaldependencies_key}".format(**locals()) + "", definition={'type': 'array', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}, rule='type')
- data__optionaldependencies_val_is_list = isinstance(data__optionaldependencies_val, (list, tuple))
- if data__optionaldependencies_val_is_list:
- data__optionaldependencies_val_len = len(data__optionaldependencies_val)
- for data__optionaldependencies_val_x, data__optionaldependencies_val_item in enumerate(data__optionaldependencies_val):
- validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_dependency(data__optionaldependencies_val_item, custom_formats, (name_prefix or "data") + ".optional-dependencies.{data__optionaldependencies_key}[{data__optionaldependencies_val_x}]".format(**locals()))
- if data__optionaldependencies_keys:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".optional-dependencies must not contain "+str(data__optionaldependencies_keys)+" properties", value=data__optionaldependencies, name="" + (name_prefix or "data") + ".optional-dependencies", definition={'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, rule='additionalProperties')
- data__optionaldependencies_len = len(data__optionaldependencies)
- if data__optionaldependencies_len != 0:
- data__optionaldependencies_property_names = True
- for data__optionaldependencies_key in data__optionaldependencies:
- try:
- if isinstance(data__optionaldependencies_key, str):
- if not custom_formats["pep508-identifier"](data__optionaldependencies_key):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".optional-dependencies must be pep508-identifier", value=data__optionaldependencies_key, name="" + (name_prefix or "data") + ".optional-dependencies", definition={'format': 'pep508-identifier'}, rule='format')
- except JsonSchemaValueException:
- data__optionaldependencies_property_names = False
- if not data__optionaldependencies_property_names:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".optional-dependencies must be named by propertyName definition", value=data__optionaldependencies, name="" + (name_prefix or "data") + ".optional-dependencies", definition={'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, rule='propertyNames')
- if "dynamic" in data_keys:
- data_keys.remove("dynamic")
- data__dynamic = data["dynamic"]
- if not isinstance(data__dynamic, (list, tuple)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic must be array", value=data__dynamic, name="" + (name_prefix or "data") + ".dynamic", definition={'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}, rule='type')
- data__dynamic_is_list = isinstance(data__dynamic, (list, tuple))
- if data__dynamic_is_list:
- data__dynamic_len = len(data__dynamic)
- for data__dynamic_x, data__dynamic_item in enumerate(data__dynamic):
- if data__dynamic_item not in ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic[{data__dynamic_x}]".format(**locals()) + " must be one of ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']", value=data__dynamic_item, name="" + (name_prefix or "data") + ".dynamic[{data__dynamic_x}]".format(**locals()) + "", definition={'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}, rule='enum')
- if data_keys:
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must not contain "+str(data_keys)+" properties", value=data, name="" + (name_prefix or "data") + "", definition={'$schema': 'http://json-schema.org/draft-07/schema', '$id': 'https://packaging.python.org/en/latest/specifications/declaring-project-metadata/', 'title': 'Package metadata stored in the ``project`` table', '$$description': ['Data structure for the **project** table inside ``pyproject.toml``', '(as initially defined in :pep:`621`)'], 'type': 'object', 'properties': {'name': {'type': 'string', 'description': 'The name (primary identifier) of the project. MUST be statically defined.', 'format': 'pep508-identifier'}, 'version': {'type': 'string', 'description': 'The version of the project as supported by :pep:`440`.', 'format': 'pep440'}, 'description': {'type': 'string', '$$description': ['The `summary description of the project', '`_']}, 'readme': {'$$description': ['`Full/detailed description of the project in the form of a README', '`_', "with meaning similar to the one defined in `core metadata's Description", '`_'], 'oneOf': [{'type': 'string', '$$description': ['Relative path to a text file (UTF-8) containing the full description', 'of the project. If the file path ends in case-insensitive ``.md`` or', '``.rst`` suffixes, then the content-type is respectively', '``text/markdown`` or ``text/x-rst``']}, {'type': 'object', 'allOf': [{'anyOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to a text file containing the full description', 'of the project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', 'description': 'Full text describing the project.'}}, 'required': ['text']}]}, {'properties': {'content-type': {'type': 'string', '$$description': ['Content-type (:rfc:`1341`) of the full description', '(e.g. ``text/markdown``). The ``charset`` parameter is assumed', 'UTF-8 when not present.'], '$comment': 'TODO: add regex pattern or format?'}}, 'required': ['content-type']}]}]}, 'requires-python': {'type': 'string', 'format': 'pep508-versionspec', '$$description': ['`The Python version requirements of the project', '`_.']}, 'license': {'description': '`Project license `_.', 'oneOf': [{'properties': {'file': {'type': 'string', '$$description': ['Relative path to the file (UTF-8) which contains the license for the', 'project.']}}, 'required': ['file']}, {'properties': {'text': {'type': 'string', '$$description': ['The license of the project whose meaning is that of the', '`License field from the core metadata', '`_.']}}, 'required': ['text']}]}, 'authors': {'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://peps.python.org/pep-0621/#authors-maintainers', 'type': 'object', 'additionalProperties': False, 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'authors' of the project.", 'The exact meaning is open to interpretation (e.g. original or primary authors,', 'current maintainers, or owners of the package).']}, 'maintainers': {'type': 'array', 'items': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://peps.python.org/pep-0621/#authors-maintainers', 'type': 'object', 'additionalProperties': False, 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, '$$description': ["The people or organizations considered to be the 'maintainers' of the project.", 'Similarly to ``authors``, the exact meaning is open to interpretation.']}, 'keywords': {'type': 'array', 'items': {'type': 'string'}, 'description': 'List of keywords to assist searching for the distribution in a larger catalog.'}, 'classifiers': {'type': 'array', 'items': {'type': 'string', 'format': 'trove-classifier', 'description': '`PyPI classifier `_.'}, '$$description': ['`Trove classifiers `_', 'which apply to the project.']}, 'urls': {'type': 'object', 'description': 'URLs associated with the project in the form ``label => value``.', 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', 'format': 'url'}}}, 'scripts': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'gui-scripts': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'entry-points': {'$$description': ['Instruct the installer to expose the given modules/functions via', '``entry-point`` discovery mechanism (useful for plugins).', 'More information available in the `Python packaging guide', '`_.'], 'propertyNames': {'format': 'python-entrypoint-group'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}}}, 'dependencies': {'type': 'array', 'description': 'Project (mandatory) dependencies.', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}, 'optional-dependencies': {'type': 'object', 'description': 'Optional dependency for the project', 'propertyNames': {'format': 'pep508-identifier'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'array', 'items': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}}, 'dynamic': {'type': 'array', '$$description': ['Specifies which fields are intentionally unspecified and expected to be', 'dynamically provided by build tools'], 'items': {'enum': ['version', 'description', 'readme', 'requires-python', 'license', 'authors', 'maintainers', 'keywords', 'classifiers', 'urls', 'scripts', 'gui-scripts', 'entry-points', 'dependencies', 'optional-dependencies']}}}, 'required': ['name'], 'additionalProperties': False, 'if': {'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, 'then': {'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, 'definitions': {'author': {'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://peps.python.org/pep-0621/#authors-maintainers', 'type': 'object', 'additionalProperties': False, 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, 'entry-point-group': {'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, 'dependency': {'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}}}, rule='additionalProperties')
- try:
- try:
- data_is_dict = isinstance(data, dict)
- if data_is_dict:
- data_len = len(data)
- if not all(prop in data for prop in ['dynamic']):
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must contain ['dynamic'] properties", value=data, name="" + (name_prefix or "data") + "", definition={'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, rule='required')
- data_keys = set(data.keys())
- if "dynamic" in data_keys:
- data_keys.remove("dynamic")
- data__dynamic = data["dynamic"]
- data__dynamic_is_list = isinstance(data__dynamic, (list, tuple))
- if data__dynamic_is_list:
- data__dynamic_contains = False
- for data__dynamic_key in data__dynamic:
- try:
- if data__dynamic_key != "version":
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic must be same as const definition: version", value=data__dynamic_key, name="" + (name_prefix or "data") + ".dynamic", definition={'const': 'version'}, rule='const')
- data__dynamic_contains = True
- break
- except JsonSchemaValueException: pass
- if not data__dynamic_contains:
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".dynamic must contain one of contains definition", value=data__dynamic, name="" + (name_prefix or "data") + ".dynamic", definition={'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}, rule='contains')
- except JsonSchemaValueException: pass
- else:
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must NOT match a disallowed definition", value=data, name="" + (name_prefix or "data") + "", definition={'not': {'required': ['dynamic'], 'properties': {'dynamic': {'contains': {'const': 'version'}, '$$description': ['version is listed in ``dynamic``']}}}, '$$comment': ['According to :pep:`621`:', ' If the core metadata specification lists a field as "Required", then', ' the metadata MUST specify the field statically or list it in dynamic', 'In turn, `core metadata`_ defines:', ' The required fields are: Metadata-Version, Name, Version.', ' All the other fields are optional.', 'Since ``Metadata-Version`` is defined by the build back-end, ``name`` and', '``version`` are the only mandatory information in ``pyproject.toml``.', '.. _core metadata: https://packaging.python.org/specifications/core-metadata/']}, rule='not')
- except JsonSchemaValueException:
- pass
- else:
- data_is_dict = isinstance(data, dict)
- if data_is_dict:
- data_len = len(data)
- if not all(prop in data for prop in ['version']):
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must contain ['version'] properties", value=data, name="" + (name_prefix or "data") + "", definition={'required': ['version'], '$$description': ['version should be statically defined in the ``version`` field']}, rule='required')
- return data
-
-def validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_dependency(data, custom_formats={}, name_prefix=None):
- if not isinstance(data, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must be string", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}, rule='type')
- if isinstance(data, str):
- if not custom_formats["pep508"](data):
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must be pep508", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/dependency', 'title': 'Dependency', 'type': 'string', 'description': 'Project dependency specification according to PEP 508', 'format': 'pep508'}, rule='format')
- return data
-
-def validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_entry_point_group(data, custom_formats={}, name_prefix=None):
- if not isinstance(data, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, rule='type')
- data_is_dict = isinstance(data, dict)
- if data_is_dict:
- data_keys = set(data.keys())
- for data_key, data_val in data.items():
- if REGEX_PATTERNS['^.+$'].search(data_key):
- if data_key in data_keys:
- data_keys.remove(data_key)
- if not isinstance(data_val, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".{data_key}".format(**locals()) + " must be string", value=data_val, name="" + (name_prefix or "data") + ".{data_key}".format(**locals()) + "", definition={'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}, rule='type')
- if isinstance(data_val, str):
- if not custom_formats["python-entrypoint-reference"](data_val):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".{data_key}".format(**locals()) + " must be python-entrypoint-reference", value=data_val, name="" + (name_prefix or "data") + ".{data_key}".format(**locals()) + "", definition={'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}, rule='format')
- if data_keys:
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must not contain "+str(data_keys)+" properties", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, rule='additionalProperties')
- data_len = len(data)
- if data_len != 0:
- data_property_names = True
- for data_key in data:
- try:
- if isinstance(data_key, str):
- if not custom_formats["python-entrypoint-name"](data_key):
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must be python-entrypoint-name", value=data_key, name="" + (name_prefix or "data") + "", definition={'format': 'python-entrypoint-name'}, rule='format')
- except JsonSchemaValueException:
- data_property_names = False
- if not data_property_names:
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must be named by propertyName definition", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/entry-point-group', 'title': 'Entry-points', 'type': 'object', '$$description': ['Entry-points are grouped together to indicate what sort of capabilities they', 'provide.', 'See the `packaging guides', '`_', 'and `setuptools docs', '`_', 'for more information.'], 'propertyNames': {'format': 'python-entrypoint-name'}, 'additionalProperties': False, 'patternProperties': {'^.+$': {'type': 'string', '$$description': ['Reference to a Python object. It is either in the form', '``importable.module``, or ``importable.module:object.attr``.'], 'format': 'python-entrypoint-reference', '$comment': 'https://packaging.python.org/specifications/entry-points/'}}}, rule='propertyNames')
- return data
-
-def validate_https___packaging_python_org_en_latest_specifications_declaring_project_metadata___definitions_author(data, custom_formats={}, name_prefix=None):
- if not isinstance(data, (dict)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must be object", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://peps.python.org/pep-0621/#authors-maintainers', 'type': 'object', 'additionalProperties': False, 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, rule='type')
- data_is_dict = isinstance(data, dict)
- if data_is_dict:
- data_keys = set(data.keys())
- if "name" in data_keys:
- data_keys.remove("name")
- data__name = data["name"]
- if not isinstance(data__name, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".name must be string", value=data__name, name="" + (name_prefix or "data") + ".name", definition={'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, rule='type')
- if "email" in data_keys:
- data_keys.remove("email")
- data__email = data["email"]
- if not isinstance(data__email, (str)):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".email must be string", value=data__email, name="" + (name_prefix or "data") + ".email", definition={'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}, rule='type')
- if isinstance(data__email, str):
- if not REGEX_PATTERNS["idn-email_re_pattern"].match(data__email):
- raise JsonSchemaValueException("" + (name_prefix or "data") + ".email must be idn-email", value=data__email, name="" + (name_prefix or "data") + ".email", definition={'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}, rule='format')
- if data_keys:
- raise JsonSchemaValueException("" + (name_prefix or "data") + " must not contain "+str(data_keys)+" properties", value=data, name="" + (name_prefix or "data") + "", definition={'$id': '#/definitions/author', 'title': 'Author or Maintainer', '$comment': 'https://peps.python.org/pep-0621/#authors-maintainers', 'type': 'object', 'additionalProperties': False, 'properties': {'name': {'type': 'string', '$$description': ['MUST be a valid email name, i.e. whatever can be put as a name, before an', 'email, in :rfc:`822`.']}, 'email': {'type': 'string', 'format': 'idn-email', 'description': 'MUST be a valid email address'}}}, rule='additionalProperties')
- return data
\ No newline at end of file
diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/dev/parse_results.sh b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/dev/parse_results.sh
deleted file mode 100644
index 80768a4005753447c49339790fe66c9b82a80aaf..0000000000000000000000000000000000000000
--- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/dev/parse_results.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/bin/bash
-# Copyright (c) Facebook, Inc. and its affiliates.
-
-# A shell script that parses metrics from the log file.
-# Make it easier for developers to track performance of models.
-
-LOG="$1"
-
-if [[ -z "$LOG" ]]; then
- echo "Usage: $0 /path/to/log/file"
- exit 1
-fi
-
-# [12/15 11:47:32] trainer INFO: Total training time: 12:15:04.446477 (0.4900 s / it)
-# [12/15 11:49:03] inference INFO: Total inference time: 0:01:25.326167 (0.13652186737060548 s / img per device, on 8 devices)
-# [12/15 11:49:03] inference INFO: Total inference pure compute time: .....
-
-# training time
-trainspeed=$(grep -o 'Overall training.*' "$LOG" | grep -Eo '\(.*\)' | grep -o '[0-9\.]*')
-echo "Training speed: $trainspeed s/it"
-
-# inference time: there could be multiple inference during training
-inferencespeed=$(grep -o 'Total inference pure.*' "$LOG" | tail -n1 | grep -Eo '\(.*\)' | grep -o '[0-9\.]*' | head -n1)
-echo "Inference speed: $inferencespeed s/it"
-
-# [12/15 11:47:18] trainer INFO: eta: 0:00:00 iter: 90000 loss: 0.5407 (0.7256) loss_classifier: 0.1744 (0.2446) loss_box_reg: 0.0838 (0.1160) loss_mask: 0.2159 (0.2722) loss_objectness: 0.0244 (0.0429) loss_rpn_box_reg: 0.0279 (0.0500) time: 0.4487 (0.4899) data: 0.0076 (0.0975) lr: 0.000200 max mem: 4161
-memory=$(grep -o 'max[_ ]mem: [0-9]*' "$LOG" | tail -n1 | grep -o '[0-9]*')
-echo "Training memory: $memory MB"
-
-echo "Easy to copypaste:"
-echo "$trainspeed","$inferencespeed","$memory"
-
-echo "------------------------------"
-
-# [12/26 17:26:32] engine.coco_evaluation: copypaste: Task: bbox
-# [12/26 17:26:32] engine.coco_evaluation: copypaste: AP,AP50,AP75,APs,APm,APl
-# [12/26 17:26:32] engine.coco_evaluation: copypaste: 0.0017,0.0024,0.0017,0.0005,0.0019,0.0011
-# [12/26 17:26:32] engine.coco_evaluation: copypaste: Task: segm
-# [12/26 17:26:32] engine.coco_evaluation: copypaste: AP,AP50,AP75,APs,APm,APl
-# [12/26 17:26:32] engine.coco_evaluation: copypaste: 0.0014,0.0021,0.0016,0.0005,0.0016,0.0011
-
-echo "COCO Results:"
-num_tasks=$(grep -o 'copypaste:.*Task.*' "$LOG" | sort -u | wc -l)
-# each task has 3 lines
-grep -o 'copypaste:.*' "$LOG" | cut -d ' ' -f 2- | tail -n $((num_tasks * 3))
diff --git a/spaces/Thafx/sdrv30/README.md b/spaces/Thafx/sdrv30/README.md
deleted file mode 100644
index 0e9fd8d15976537fa247766a9b06576e53ad2c71..0000000000000000000000000000000000000000
--- a/spaces/Thafx/sdrv30/README.md
+++ /dev/null
@@ -1,20 +0,0 @@
----
-title: Realistic Vision v3.0
-emoji: 📷
-colorFrom: yellow
-colorTo: red
-sdk: gradio
-sdk_version: 3.18.0
-app_file: app.py
-pinned: true
-duplicated_from: Thafx/sdrv20
-tags:
- - stable-diffusion
- - stable-diffusion-diffusers
- - text-to-image
- - realistic-vision
-models:
- - SG161222/Realistic_Vision_V3.0_VAE
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/TheHouseOfAI/ActionRecognition/app.py b/spaces/TheHouseOfAI/ActionRecognition/app.py
deleted file mode 100644
index f646f4886b7a3ce2dad92ac0823747810b23890e..0000000000000000000000000000000000000000
--- a/spaces/TheHouseOfAI/ActionRecognition/app.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Bibliotheken
-import tensorflow as tf
-import tensorflow_hub as hub
-import cv2
-import numpy as np
-from urllib import request
-
-i3d = hub.load("https://tfhub.dev/deepmind/i3d-kinetics-400/1").signatures['default']
-# Officële Engelstalige labels
-KINETICS_URL = "https://raw.githubusercontent.com/deepmind/kinetics-i3d/master/data/label_map.txt"
-with request.urlopen(KINETICS_URL) as obj:
- labels = [line.decode("utf-8").strip() for line in obj.readlines()]
-# Zelf gedefiniëerde Nederlandstalige labels
-#labels_nl = ['abseilen', 'luchtdrummen', 'vragen beantwoorden', 'applaudisseren', 'crème aanbrengen', 'boogschieten', 'armworstelen', 'bloemen schikken', 'computer in elkaar zetten', 'veilen', 'baby wakker worden ', 'koekjes bakken', 'ballon blazen', 'verbanden', 'barbequen', 'bartending', 'beatboxing', 'bijen houden', 'buikdansen', 'bankdrukken', 'achterover buigen', 'buigen metaal', 'fietsen door de sneeuw', 'zandstralen', 'glas blazen', 'bladeren blazen', 'neus blazen', 'kaarsen uitblazen', 'bobsleeën', 'boekbinden', 'springen op trampoline', ' bowlen', 'haar vlechten', 'paneren of paneermeel', 'breakdancing', 'borstel schilderen', 'haar borstelen', 'tanden poetsen', 'kast bouwen', 'schuur bouwen', 'bungeejumpen', 'busking ', 'kanoën of kajakken', 'capoeira', 'baby dragen', 'cartwheeling', 'pompoen snijden', 'vissen vangen', 'honkbal vangen of gooien', 'frisbee vangen of gooien', 'softbal vangen of gooien ', 'vieren', 'olie verversen', 'wiel verwisselen', 'banden controleren', 'chee rleiden', 'houthakken', 'klappen', 'aardewerk maken', 'schoon en eikel', 'vloer schoonmaken', 'goten schoonmaken', 'zwembad schoonmaken', 'schoenen schoonmaken', 'toilet schoonmaken', ' ramen lappen', 'touwklimmen', 'ladder beklimmen', 'boomklimmen', 'contactjongleren', 'kip koken', 'ei koken', 'koken op kampvuur', 'worstjes koken', 'geld tellen' , 'country line dancing', 'cracking neck', 'crawling baby', 'crossing river', 'huilen', 'krullend haar', 'nagels knippen', 'ananas snijden', 'watermeloen snijden', 'dansballet' , 'dansende charleston', 'dansende gangnamstijl', 'dansende macarena', 'deadlifting', 'de kerstboom versieren', 'graven', 'dineren', 'disc golfen', 'klifduiken', 'trefbal', 'aerobics doen', 'de was doen', 'nagels doen', 'tekenen', 'basketbal dribbelen', 'drinken', 'bier drinken', 'shotjes drinken', 'autorijden', 'tractor rijden', 'droppen schoppen', 'vingers trommelen', 'basketbal dunken', 'haar verven', 'hamburger eten', 'taart eten', 'eten g wortelen', 'chips eten', 'donuts eten', 'hotdog eten', 'ijs eten', 'spaghetti eten', 'watermeloen eten', 'eieren zoeken', 'arm trainen', 'trainen met een oefening bal', 'vuur blussen', 'faceplanting', 'vogels voeren', 'vissen voeren', 'geiten voeren', 'wenkbrauwen vullen', 'vingerknippen', 'haar fixeren', 'pannenkoek omdraaien', 'vliegende vlieger ', 'kleding opvouwen', 'servetten opvouwen', 'papier opvouwen', 'front raises', 'groenten frituren', 'vuilnis verzamelen', 'gorgelen', 'haar knippen', 'een tatoeage laten zetten', 'geven of een onderscheiding ontvangen', 'golf chippen', 'golf rijden', 'golf putten', 'vlees malen', 'hond verzorgen', 'paard verzorgen', 'gymnastiek tuimelen', 'hamerwerpen', 'headbangen', ' kopstoten', 'hoogspringen', 'hoge trap', 'honkbal slaan', 'hockeystop', 'slang vasthouden', 'hinkelen', 'hoverboarden', 'knuffelen', 'hoelahoep', 'hurdling', ' slingeren (sport)', 'ijsklimmen', 'ijsvissen', 'schaatsen', 'strijken', 'speerwerpen', 'jetskiën', 'joggen', 'jongleren met ballen', 'jongleren met vuur', 'jongleren met voetbal', 'springen in het zwembad', 'jumpstyle dansen', 'schoppen met velddoelpunt', 'schoppen met voetbal', 'kussen', 'kitesurfen', 'breien' , 'krumping', 'lachen', 'stenen leggen', 'verspringen', 'lunge', 'cake maken', 'sandwich maken', 'bed opmaken', 'sieraden maken', 'pizza maken', 'sneeuwpop maken', 'sushi maken', 'thee zetten', 'marcheren', 'rug masseren', 'voeten masseren', 'benen masseren', 'hoofd masseren', 'koe melken', 'vloer dweilen', 'motorrijden', 'meubels verplaatsen', 'gazon maaien', 'nieuwsverankering', 'fles openen', 'openingscadeau', 'paragliding', 'parasailing', 'parkour', 'American football passeren (in game)' , 'American football passeren (niet in het spel)', 'appels schillen', 'aardappelen schillen', 'aaidier (geen kat)', 'kat aaien', 'fruit plukken', 'bomen planten', 'pleisteren', 'accordeon spelen', 'badminton spelen', 'doedelzak spelen', 'basketbal spelen', 'basgitaar spelen', 'kaarten spelen', 'c spelen ello', 'schaken', 'klarinet spelen', 'controller spelen', 'cricket spelen', 'bekkens spelen', 'didgeridoo spelen', 'drums spelen', 'fluit spelen', 'gitaar spelen', 'spelen mondharmonica', 'harp spelen', 'ijshockey spelen', 'keyboard spelen', 'kickball spelen', 'monopolie spelen', 'orgel spelen', 'paintball spelen', 'piano spelen', 'poker spelen', ' blokfluit spelen', 'saxofoon spelen', 'squash of racquetball spelen', 'tennis spelen', 'trombone spelen', 'trompet spelen', 'ukelele spelen', 'viool spelen', 'volleyballen', 'xylofoon spelen' , 'polsstokhoogspringen', 'weersvoorspelling presenteren', 'pull ups', 'vuist pompen', 'gas pompen', 'bokszak', 'bokser (boksen)', 'push up', 'auto duwen', 'kar duwen', 'rolstoel duwen', 'boek lezen', 'krant lezen', 'muziek opnemen', 'fietsen', 'kameel rijden', 'olifant rijden', 'mechanische stier rijden', 'berg rijden fiets', 'muilezelrijden', 'rijden of lopen met paard', 'rijdende scooter', 'rijden op eenwieler', 'papier scheuren', 'robotdansen', 'rotsklimmen', 'rotsschaarpapier', 'rolschaatsen', 'rennen op loopband', 'zeilen', 'salsadansen', 'schuurvloer ', 'eieren klauteren', 'duiken', 'tafel dekken', 'handen schudden', 'hoofd schudden', 'messen slijpen', 'potlood slijpen', 'hoofd scheren', 'benen scheren', 'schaap scheren ', 'schoenen glimmen', 'basketbal schieten', 'doel (voetbal) schieten', 'kogelstoten', 'sneeuwschuiven', 'papier versnipperen', 'kaarten schudden', 'side kick', 'tolken in gebarentaal' , 'zingen', 'situp', 'skateboarden', 'schansspringen', 'skiën (geen slalom of langlaufen)', 'langlaufen', 'slalom skiën', 'touwtjespringen', 'parachutespringen', 'slacklinen' , 'slaan', 'sledehondenraces', 'roken', 'roken waterpijp', 'snatch gewichtheffen', 'niezen', 'snuiven', 'snorkelen', 'snowboarden', 'snowkiten', 'sneeuwmobiel', 'salto', 'spinning poi', 'spuiten', 'spuiten', 'springplankduiken', 'squat', 'tong uitsteken', 's stampende druiven', 'arm strekken', 'been strekken', 'gitaar tokkelen', 'surfende menigte', 'surfend water', 'veegvloer', 'rugslag zwemmen', 'schoolslag zwemmen', 'zwemmen vlinderslag' , 'swing dancing', 'swinging legs', 'swinging on something', 'zwaardvechten', 'tai chi', 'een douche nemen', 'tangodansen', 'tapdansen', 'tikken op gitaar', 'tikken pen', 'bier proeven', 'eten proeven', 'getuigen', 'sms-en', 'bijl gooien', 'bal gooien', 'discuswerpen', 'kietelen', 'rodelen', 'munt gooien', ' sla gooien', 'hond trainen', 'trapezing', 'baard trimmen of scheren', 'bomen trimmen', 'hinkstapspringen', 'vlinderdas binden', 'knoop binden (niet op een stropdas)', 'das binden ', 'uitpakken', 'vrachtwagen lossen', 'computer gebruiken', 'afstandsbediening gebruiken (geen gamen)', 'segway gebruiken', 'kluis', 'in de rij wachten', 'de hond uitlaten', 'afwassen ', 'voeten wassen', 'haar wassen', 'handen wassen', 'waterskiën', 'waterglijden', 'planten water geven', 'terug waxen', 'borst harsen', 'waxen wenkbrauwen trekken', 'benen harsen', 'mand weven', 'lassen', 'fluiten', 'windsurfen', 'cadeau inpakken', 'worstelen', 'schrijven', 'geeuwen', 'yoga', 'zumba' ]
-def crop_center_square(frame):
- y, x = frame.shape[0:2]
- min_dim = min(y, x)
- start_x = (x // 2) - (min_dim // 2)
- start_y = (y // 2) - (min_dim // 2)
- return frame[start_y:start_y+min_dim,start_x:start_x+min_dim]
-
-def load_video(path, max_frames=0, resize=(224, 224)):
- cap = cv2.VideoCapture(path)
- frames = []
- try:
- while True:
- ret, frame = cap.read()
- if not ret:
- break
- frame = crop_center_square(frame)
- frame = cv2.resize(frame, resize)
- frame = frame[:, :, [2, 1, 0]]
- frames.append(frame)
- if len(frames) == max_frames:
- break
- finally:
- cap.release()
- return np.array(frames) / 255.0
-
-def predict(sample_video):
- # Add a batch axis to the sample video.
- model_input = tf.constant(sample_video, dtype=tf.float32)[tf.newaxis, ...]
- logits = i3d(model_input)['default'][0]
- probabilities = tf.nn.softmax(logits)
- voorspelling = ""
- print("Top 3 voorspellingen:")
- for i in np.argsort(probabilities)[::-1][:3]:
- voorspelling = voorspelling + "\n" + f"{labels[i]:22}: {probabilities[i] * 100:5.2f}%"
- return voorspelling
-
-def voorspelling(video_path):
- sample_video = load_video(video_path)
- return predict(sample_video)
-
-import gradio as gr
-video_in = gr.Video(label="Video IN - Bestand in MP4 formaat en maximum 50 MB")
-classificatie_out = gr.Text(label="Classificatie - voorspelling :")
-demo = gr.Interface(fn=voorspelling,
- inputs=video_in,
- outputs=classificatie_out,
- examples=["golf.mp4", "push_up.mp4", "sit_ups.mp4", "gewichtheffen.mp4", "tennis_opslag_01.mp4"],
- title="Videoclassificatie op basis van actieherkenning",
- description="Videoclassificatie is de taak om een omschrijving (classificatie) te geven in relatie tot de inhoud van een video. Een goede classificatie is er een die de hele video het best beschrijft. Een video kan bijvoorbeeld een bloem in een frame bevatten, maar de omschrijving die centraal staat in de video kan iets anders zijn (bijvoorbeeld 'wandelen'). Selecteer één van de voorbeelden of laad zelf een video op ...",
- article = "Inflated 3D Convnet model trained for action recognition on Kinetics-400 : https://github.com/deepmind/kinetics-i3d")
-demo.launch()
\ No newline at end of file
diff --git a/spaces/ThirdEyeData/Customer-Conversion-Prediction/matumizi/util.py b/spaces/ThirdEyeData/Customer-Conversion-Prediction/matumizi/util.py
deleted file mode 100644
index 7eeb7329606da8cad7912e3c9f9493863b3f7f75..0000000000000000000000000000000000000000
--- a/spaces/ThirdEyeData/Customer-Conversion-Prediction/matumizi/util.py
+++ /dev/null
@@ -1,2345 +0,0 @@
-#!/usr/local/bin/python3
-
-# Author: Pranab Ghosh
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you
-# may not use this file except in compliance with the License. You may
-# obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied. See the License for the specific language governing
-# permissions and limitations under the License.
-
-import os
-import sys
-from random import randint
-import random
-import time
-import uuid
-from datetime import datetime
-import math
-import numpy as np
-import pandas as pd
-import matplotlib.pyplot as plt
-import numpy as np
-import logging
-import logging.handlers
-import pickle
-from contextlib import contextmanager
-
-tokens = ["0","1","2","3","4","5","6","7","8","9","A","B","C","D","E","F","G","H","I","J","K","L","M",
- "N","O","P","Q","R","S","T","U","V","W","X","Y","Z","0","1","2","3","4","5","6","7","8","9"]
-numTokens = tokens[:10]
-alphaTokens = tokens[10:36]
-loCaseChars = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k","l","m","n","o",
-"p","q","r","s","t","u","v","w","x","y","z"]
-
-typeInt = "int"
-typeFloat = "float"
-typeString = "string"
-
-secInMinute = 60
-secInHour = 60 * 60
-secInDay = 24 * secInHour
-secInWeek = 7 * secInDay
-secInYear = 365 * secInDay
-secInMonth = secInYear / 12
-
-minInHour = 60
-minInDay = 24 * minInHour
-
-ftPerYard = 3
-ftPerMile = ftPerYard * 1760
-
-
-def genID(size):
- """
- generates ID
-
- Parameters
- size : size of ID
- """
- id = ""
- for i in range(size):
- id = id + selectRandomFromList(tokens)
- return id
-
-def genIdList(numId, idSize):
- """
- generate list of IDs
-
- Parameters:
- numId: number of Ids
- idSize: ID size
- """
- iDs = []
- for i in range(numId):
- iDs.append(genID(idSize))
- return iDs
-
-def genNumID(size):
- """
- generates ID consisting of digits onl
-
- Parameters
- size : size of ID
- """
- id = ""
- for i in range(size):
- id = id + selectRandomFromList(numTokens)
- return id
-
-def genLowCaseID(size):
- """
- generates ID consisting of lower case chars
-
- Parameters
- size : size of ID
- """
- id = ""
- for i in range(size):
- id = id + selectRandomFromList(loCaseChars)
- return id
-
-def genNumIdList(numId, idSize):
- """
- generate list of numeric IDs
-
- Parameters:
- numId: number of Ids
- idSize: ID size
- """
- iDs = []
- for i in range(numId):
- iDs.append(genNumID(idSize))
- return iDs
-
-def genNameInitial():
- """
- generate name initial
- """
- return selectRandomFromList(alphaTokens) + selectRandomFromList(alphaTokens)
-
-def genPhoneNum(arCode):
- """
- generates phone number
-
- Parameters
- arCode: area code
- """
- phNum = genNumID(7)
- return arCode + str(phNum)
-
-def selectRandomFromList(ldata):
- """
- select an element randomly from a lis
-
- Parameters
- ldata : list data
- """
- return ldata[randint(0, len(ldata)-1)]
-
-def selectOtherRandomFromList(ldata, cval):
- """
- select an element randomly from a list excluding the given one
-
- Parameters
- ldata : list data
- cval : value to be excluded
- """
- nval = selectRandomFromList(ldata)
- while nval == cval:
- nval = selectRandomFromList(ldata)
- return nval
-
-def selectRandomSubListFromList(ldata, num):
- """
- generates random sublist from a list without replacemment
-
- Parameters
- ldata : list data
- num : output list size
- """
- assertLesser(num, len(ldata), "size of sublist to be sampled greater than or equal to main list")
- i = randint(0, len(ldata)-1)
- sel = ldata[i]
- selSet = {i}
- selList = [sel]
- while (len(selSet) < num):
- i = randint(0, len(ldata)-1)
- if (i not in selSet):
- sel = ldata[i]
- selSet.add(i)
- selList.append(sel)
- return selList
-
-def selectRandomSubListFromListWithRepl(ldata, num):
- """
- generates random sublist from a list with replacemment
-
- Parameters
- ldata : list data
- num : output list size
-
- """
- return list(map(lambda i : selectRandomFromList(ldata), range(num)))
-
-def selectRandomFromDict(ddata):
- """
- select an element randomly from a dictionary
-
- Parameters
- ddata : dictionary data
- """
- dkeys = list(ddata.keys())
- dk = selectRandomFromList(dkeys)
- el = (dk, ddata[dk])
- return el
-
-def setListRandomFromList(ldata, ldataRepl):
- """
- sets some elents in the first list randomly with elements from the second list
-
- Parameters
- ldata : list data
- ldataRepl : list with replacement data
- """
- l = len(ldata)
- selSet = set()
- for d in ldataRepl:
- i = randint(0, l-1)
- while i in selSet:
- i = randint(0, l-1)
- ldata[i] = d
- selSet.add(i)
-
-def genIpAddress():
- """
- generates IP address
- """
- i1 = randint(0,256)
- i2 = randint(0,256)
- i3 = randint(0,256)
- i4 = randint(0,256)
- ip = "%d.%d.%d.%d" %(i1,i2,i3,i4)
- return ip
-
-def curTimeMs():
- """
- current time in ms
- """
- return int((datetime.utcnow() - datetime(1970,1,1)).total_seconds() * 1000)
-
-def secDegPolyFit(x1, y1, x2, y2, x3, y3):
- """
- second deg polynomial
-
- Parameters
- x1 : 1st point x
- y1 : 1st point y
- x2 : 2nd point x
- y2 : 2nd point y
- x3 : 3rd point x
- y3 : 3rd point y
- """
- t = (y1 - y2) / (x1 - x2)
- a = t - (y2 - y3) / (x2 - x3)
- a = a / (x1 - x3)
- b = t - a * (x1 + x2)
- c = y1 - a * x1 * x1 - b * x1
- return (a, b, c)
-
-def range_limit(val, minv, maxv):
- """
- range limit a value
-
- Parameters
- val : data value
- minv : minimum
- maxv : maximum
- """
- if (val < minv):
- val = minv
- elif (val > maxv):
- val = maxv
- return val
-
-def rangeLimit(val, minv, maxv):
- """
- range limit a value
-
- Parameters
- val : data value
- minv : minimum
- maxv : maximum
- """
- return range_limit(val, minv, maxv)
-
-def isInRange(val, minv, maxv):
- """
- checks if within range
-
- Parameters
- val : data value
- minv : minimum
- maxv : maximum
- """
- return val >= minv and val <= maxv
-
-def stripFileLines(filePath, offset):
- """
- strips number of chars from both ends
-
- Parameters
- filePath : file path
- offset : offset from both ends of line
- """
- fp = open(filePath, "r")
- for line in fp:
- stripped = line[offset:len(line) - 1 - offset]
- print (stripped)
- fp.close()
-
-def genLatLong(lat1, long1, lat2, long2):
- """
- generate lat log within limits
-
- Parameters
- lat1 : lat of 1st point
- long1 : long of 1st point
- lat2 : lat of 2nd point
- long2 : long of 2nd point
- """
- lat = lat1 + (lat2 - lat1) * random.random()
- longg = long1 + (long2 - long1) * random.random()
- return (lat, longg)
-
-def geoDistance(lat1, long1, lat2, long2):
- """
- find geo distance in ft
-
- Parameters
- lat1 : lat of 1st point
- long1 : long of 1st point
- lat2 : lat of 2nd point
- long2 : long of 2nd point
- """
- latDiff = math.radians(lat1 - lat2)
- longDiff = math.radians(long1 - long2)
- l1 = math.sin(latDiff/2.0)
- l2 = math.sin(longDiff/2.0)
- l3 = math.cos(math.radians(lat1))
- l4 = math.cos(math.radians(lat2))
- a = l1 * l1 + l3 * l4 * l2 * l2
- l5 = math.sqrt(a)
- l6 = math.sqrt(1.0 - a)
- c = 2.0 * math.atan2(l5, l6)
- r = 6371008.8 * 3.280840
- return c * r
-
-def minLimit(val, limit):
- """
- min limit
- Parameters
-
- """
- if (val < limit):
- val = limit
- return val;
-
-def maxLimit(val, limit):
- """
- max limit
- Parameters
-
- """
- if (val > limit):
- val = limit
- return val;
-
-def rangeSample(val, minLim, maxLim):
- """
- if out side range sample within range
-
- Parameters
- val : value
- minLim : minimum
- maxLim : maximum
- """
- if val < minLim or val > maxLim:
- val = randint(minLim, maxLim)
- return val
-
-def genRandomIntListWithinRange(size, minLim, maxLim):
- """
- random unique list of integers within range
-
- Parameters
- size : size of returned list
- minLim : minimum
- maxLim : maximum
- """
- values = set()
- for i in range(size):
- val = randint(minLim, maxLim)
- while val not in values:
- values.add(val)
- return list(values)
-
-def preturbScalar(value, vrange, distr="uniform"):
- """
- preturbs a mutiplicative value within range
-
- Parameters
- value : data value
- vrange : value delta fraction
- distr : noise distribution type
- """
- if distr == "uniform":
- scale = 1.0 - vrange + 2 * vrange * random.random()
- elif distr == "normal":
- scale = 1.0 + np.random.normal(0, vrange)
- else:
- exisWithMsg("unknown noise distr " + distr)
- return value * scale
-
-def preturbScalarAbs(value, vrange):
- """
- preturbs an absolute value within range
-
- Parameters
- value : data value
- vrange : value delta absolute
-
- """
- delta = - vrange + 2.0 * vrange * random.random()
- return value + delta
-
-def preturbVector(values, vrange):
- """
- preturbs a list within range
-
- Parameters
- values : list data
- vrange : value delta fraction
- """
- nValues = list(map(lambda va: preturbScalar(va, vrange), values))
- return nValues
-
-def randomShiftVector(values, smin, smax):
- """
- shifts a list by a random quanity with a range
-
- Parameters
- values : list data
- smin : samplinf minimum
- smax : sampling maximum
- """
- shift = np.random.uniform(smin, smax)
- return list(map(lambda va: va + shift, values))
-
-def floatRange(beg, end, incr):
- """
- generates float range
-
- Parameters
- beg :range begin
- end: range end
- incr : range increment
- """
- return list(np.arange(beg, end, incr))
-
-def shuffle(values, *numShuffles):
- """
- in place shuffling with swap of pairs
-
- Parameters
- values : list data
- numShuffles : parameter list for number of shuffles
- """
- size = len(values)
- if len(numShuffles) == 0:
- numShuffle = int(size / 2)
- elif len(numShuffles) == 1:
- numShuffle = numShuffles[0]
- else:
- numShuffle = randint(numShuffles[0], numShuffles[1])
- print("numShuffle {}".format(numShuffle))
- for i in range(numShuffle):
- first = random.randint(0, size - 1)
- second = random.randint(0, size - 1)
- while first == second:
- second = random.randint(0, size - 1)
- tmp = values[first]
- values[first] = values[second]
- values[second] = tmp
-
-
-def splitList(itms, numGr):
- """
- splits a list into sub lists of approximately equal size, with items in sublists randomly chod=sen
-
- Parameters
- itms ; list of values
- numGr : no of groups
- """
- tcount = len(itms)
- cItems = list(itms)
- sz = int(len(cItems) / numGr)
- groups = list()
- count = 0
- for i in range(numGr):
- if (i == numGr - 1):
- csz = tcount - count
- else:
- csz = sz + randint(-2, 2)
- count += csz
- gr = list()
- for j in range(csz):
- it = selectRandomFromList(cItems)
- gr.append(it)
- cItems.remove(it)
- groups.append(gr)
- return groups
-
-def multVector(values, vrange):
- """
- multiplies a list within value range
-
- Parameters
- values : list of values
- vrange : fraction of vaue to be used to update
- """
- scale = 1.0 - vrange + 2 * vrange * random.random()
- nValues = list(map(lambda va: va * scale, values))
- return nValues
-
-def weightedAverage(values, weights):
- """
- calculates weighted average
-
- Parameters
- values : list of values
- weights : list of weights
- """
- assert len(values) == len(weights), "values and weights should be same size"
- vw = zip(values, weights)
- wva = list(map(lambda e : e[0] * e[1], vw))
- #wa = sum(x * y for x, y in vw) / sum(weights)
- wav = sum(wva) / sum(weights)
- return wav
-
-def extractFields(line, delim, keepIndices):
- """
- breaks a line into fields and keeps only specified fileds and returns new line
-
- Parameters
- line ; deli separated string
- delim : delemeter
- keepIndices : list of indexes to fields to be retained
- """
- items = line.split(delim)
- newLine = []
- for i in keepIndices:
- newLine.append(line[i])
- return delim.join(newLine)
-
-def remFields(line, delim, remIndices):
- """
- removes fields from delim separated string
-
- Parameters
- line ; delemeter separated string
- delim : delemeter
- remIndices : list of indexes to fields to be removed
- """
- items = line.split(delim)
- newLine = []
- for i in range(len(items)):
- if not arrayContains(remIndices, i):
- newLine.append(line[i])
- return delim.join(newLine)
-
-def extractList(data, indices):
- """
- extracts list from another list, given indices
-
- Parameters
- remIndices : list data
- indices : list of indexes to fields to be retained
- """
- if areAllFieldsIncluded(data, indices):
- exList = data.copy()
- #print("all indices")
- else:
- exList = list()
- le = len(data)
- for i in indices:
- assert i < le , "index {} out of bound {}".format(i, le)
- exList.append(data[i])
-
- return exList
-
-def arrayContains(arr, item):
- """
- checks if array contains an item
-
- Parameters
- arr : list data
- item : item to search
- """
- contains = True
- try:
- arr.index(item)
- except ValueError:
- contains = False
- return contains
-
-def strToIntArray(line, delim=","):
- """
- int array from delim separated string
-
- Parameters
- line ; delemeter separated string
- """
- arr = line.split(delim)
- return [int(a) for a in arr]
-
-def strToFloatArray(line, delim=","):
- """
- float array from delim separated string
-
- Parameters
- line ; delemeter separated string
- """
- arr = line.split(delim)
- return [float(a) for a in arr]
-
-def strListOrRangeToIntArray(line):
- """
- int array from delim separated string or range
-
- Parameters
- line ; delemeter separated string
- """
- varr = line.split(",")
- if (len(varr) > 1):
- iarr = list(map(lambda v: int(v), varr))
- else:
- vrange = line.split(":")
- if (len(vrange) == 2):
- lo = int(vrange[0])
- hi = int(vrange[1])
- iarr = list(range(lo, hi+1))
- else:
- iarr = [int(line)]
- return iarr
-
-def toStr(val, precision):
- """
- converts any type to string
-
- Parameters
- val : value
- precision ; precision for float value
- """
- if type(val) == float or type(val) == np.float64 or type(val) == np.float32:
- format = "%" + ".%df" %(precision)
- sVal = format %(val)
- else:
- sVal = str(val)
- return sVal
-
-def toStrFromList(values, precision, delim=","):
- """
- converts list of any type to delim separated string
-
- Parameters
- values : list data
- precision ; precision for float value
- delim : delemeter
- """
- sValues = list(map(lambda v: toStr(v, precision), values))
- return delim.join(sValues)
-
-def toIntList(values):
- """
- convert to int list
-
- Parameters
- values : list data
- """
- return list(map(lambda va: int(va), values))
-
-def toFloatList(values):
- """
- convert to float list
-
- Parameters
- values : list data
-
- """
- return list(map(lambda va: float(va), values))
-
-def toStrList(values, precision=None):
- """
- convert to string list
-
- Parameters
- values : list data
- precision ; precision for float value
- """
- return list(map(lambda va: toStr(va, precision), values))
-
-def toIntFromBoolean(value):
- """
- convert to int
-
- Parameters
- value : boolean value
- """
- ival = 1 if value else 0
- return ival
-
-def scaleBySum(ldata):
- """
- scales so that sum is 1
-
- Parameters
- ldata : list data
- """
- s = sum(ldata)
- return list(map(lambda e : e/s, ldata))
-
-def scaleByMax(ldata):
- """
- scales so that max value is 1
-
- Parameters
- ldata : list data
- """
- m = max(ldata)
- return list(map(lambda e : e/m, ldata))
-
-def typedValue(val, dtype=None):
- """
- return typed value given string, discovers data type if not specified
-
- Parameters
- val : value
- dtype : data type
- """
- tVal = None
-
- if dtype is not None:
- if dtype == "num":
- dtype = "int" if dtype.find(".") == -1 else "float"
-
- if dtype == "int":
- tVal = int(val)
- elif dtype == "float":
- tVal = float(val)
- elif dtype == "bool":
- tVal = bool(val)
- else:
- tVal = val
- else:
- if type(val) == str:
- lVal = val.lower()
-
- #int
- done = True
- try:
- tVal = int(val)
- except ValueError:
- done = False
-
- #float
- if not done:
- done = True
- try:
- tVal = float(val)
- except ValueError:
- done = False
-
- #boolean
- if not done:
- done = True
- if lVal == "true":
- tVal = True
- elif lVal == "false":
- tVal = False
- else:
- done = False
- #None
- if not done:
- if lVal == "none":
- tVal = None
- else:
- tVal = val
- else:
- tVal = val
-
- return tVal
-
-def isInt(val):
- """
- return true if string is int and the typed value
-
- Parameters
- val : value
- """
- valInt = True
- try:
- tVal = int(val)
- except ValueError:
- valInt = False
- tVal = None
- r = (valInt, tVal)
- return r
-
-def isFloat(val):
- """
- return true if string is float
-
- Parameters
- val : value
- """
- valFloat = True
- try:
- tVal = float(val)
- except ValueError:
- valFloat = False
- tVal = None
- r = (valFloat, tVal)
- return r
-
-def getAllFiles(dirPath):
- """
- get all files recursively
-
- Parameters
- dirPath : directory path
- """
- filePaths = []
- for (thisDir, subDirs, fileNames) in os.walk(dirPath):
- for fileName in fileNames:
- filePaths.append(os.path.join(thisDir, fileName))
- filePaths.sort()
- return filePaths
-
-def getFileContent(fpath, verbose=False):
- """
- get file contents in directory
-
- Parameters
- fpath ; directory path
- verbose : verbosity flag
- """
- # dcument list
- docComplete = []
- filePaths = getAllFiles(fpath)
-
- # read files
- for filePath in filePaths:
- if verbose:
- print("next file " + filePath)
- with open(filePath, 'r') as contentFile:
- content = contentFile.read()
- docComplete.append(content)
- return (docComplete, filePaths)
-
-def getOneFileContent(fpath):
- """
- get one file contents
-
- Parameters
- fpath : file path
- """
- with open(fpath, 'r') as contentFile:
- docStr = contentFile.read()
- return docStr
-
-def getFileLines(dirPath, delim=","):
- """
- get lines from a file
-
- Parameters
- dirPath : file path
- delim : delemeter
- """
- lines = list()
- for li in fileRecGen(dirPath, delim):
- lines.append(li)
- return lines
-
-def getFileSampleLines(dirPath, percen, delim=","):
- """
- get sampled lines from a file
-
- Parameters
- dirPath : file path
- percen : sampling percentage
- delim : delemeter
- """
- lines = list()
- for li in fileRecGen(dirPath, delim):
- if randint(0, 100) < percen:
- lines.append(li)
- return lines
-
-def getFileColumnAsString(dirPath, index, delim=","):
- """
- get string column from a file
-
- Parameters
- dirPath : file path
- index : index
- delim : delemeter
- """
- fields = list()
- for rec in fileRecGen(dirPath, delim):
- fields.append(rec[index])
- #print(fields)
- return fields
-
-def getFileColumnsAsString(dirPath, indexes, delim=","):
- """
- get multiple string columns from a file
-
- Parameters
- dirPath : file path
- indexes : indexes of columns
- delim : delemeter
-
- """
- nindex = len(indexes)
- columns = list(map(lambda i : list(), range(nindex)))
- for rec in fileRecGen(dirPath, delim):
- for i in range(nindex):
- columns[i].append(rec[indexes[i]])
- return columns
-
-def getFileColumnAsFloat(dirPath, index, delim=","):
- """
- get float fileds from a file
-
- Parameters
- dirPath : file path
- index : index
- delim : delemeter
-
- """
- #print("{} {}".format(dirPath, index))
- fields = getFileColumnAsString(dirPath, index, delim)
- return list(map(lambda v:float(v), fields))
-
-def getFileColumnAsInt(dirPath, index, delim=","):
- """
- get float fileds from a file
-
- Parameters
- dirPath : file path
- index : index
- delim : delemeter
- """
- fields = getFileColumnAsString(dirPath, index, delim)
- return list(map(lambda v:int(v), fields))
-
-def getFileAsIntMatrix(dirPath, columns, delim=","):
- """
- extracts int matrix from csv file given column indices with each row being concatenation of
- extracted column values row size = num of columns
-
- Parameters
- dirPath : file path
- columns : indexes of columns
- delim : delemeter
- """
- mat = list()
- for rec in fileSelFieldsRecGen(dirPath, columns, delim):
- mat.append(asIntList(rec))
- return mat
-
-def getFileAsFloatMatrix(dirPath, columns, delim=","):
- """
- extracts float matrix from csv file given column indices with each row being concatenation of
- extracted column values row size = num of columns
-
- Parameters
- dirPath : file path
- columns : indexes of columns
- delim : delemeter
- """
- mat = list()
- for rec in fileSelFieldsRecGen(dirPath, columns, delim):
- mat.append(asFloatList(rec))
- return mat
-
-def getFileAsFloatColumn(dirPath):
- """
- grt float list from a file with one float per row
-
- Parameters
- dirPath : file path
- """
- flist = list()
- for rec in fileRecGen(dirPath, None):
- flist.append(float(rec))
- return flist
-
-def getFileAsFiltFloatMatrix(dirPath, filt, columns, delim=","):
- """
- extracts float matrix from csv file given row filter and column indices with each row being
- concatenation of extracted column values row size = num of columns
-
- Parameters
- dirPath : file path
- columns : indexes of columns
- filt : row filter lambda
- delim : delemeter
-
- """
- mat = list()
- for rec in fileFiltSelFieldsRecGen(dirPath, filt, columns, delim):
- mat.append(asFloatList(rec))
- return mat
-
-def getFileAsTypedRecords(dirPath, types, delim=","):
- """
- extracts typed records from csv file with each row being concatenation of
- extracted column values
-
- Parameters
- dirPath : file path
- types : data types
- delim : delemeter
- """
- (dtypes, cvalues) = extractTypesFromString(types)
- tdata = list()
- for rec in fileRecGen(dirPath, delim):
- trec = list()
- for index, value in enumerate(rec):
- value = __convToTyped(index, value, dtypes)
- trec.append(value)
- tdata.append(trec)
- return tdata
-
-
-def getFileColsAsTypedRecords(dirPath, columns, types, delim=","):
- """
- extracts typed records from csv file given column indices with each row being concatenation of
- extracted column values
-
- Parameters
- Parameters
- dirPath : file path
- columns : column indexes
- types : data types
- delim : delemeter
- """
- (dtypes, cvalues) = extractTypesFromString(types)
- tdata = list()
- for rec in fileSelFieldsRecGen(dirPath, columns, delim):
- trec = list()
- for indx, value in enumerate(rec):
- tindx = columns[indx]
- value = __convToTyped(tindx, value, dtypes)
- trec.append(value)
- tdata.append(trec)
- return tdata
-
-def getFileColumnsMinMax(dirPath, columns, dtype, delim=","):
- """
- extracts numeric matrix from csv file given column indices. For each column return min and max
-
- Parameters
- dirPath : file path
- columns : column indexes
- dtype : data type
- delim : delemeter
- """
- dtypes = list(map(lambda c : str(c) + ":" + dtype, columns))
- dtypes = ",".join(dtypes)
- #print(dtypes)
-
- tdata = getFileColsAsTypedRecords(dirPath, columns, dtypes, delim)
- minMax = list()
- ncola = len(tdata[0])
- ncole = len(columns)
- assertEqual(ncola, ncole, "actual no of columns different from expected")
-
- for ci in range(ncole):
- vmin = sys.float_info.max
- vmax = sys.float_info.min
- for r in tdata:
- cv = r[ci]
- vmin = cv if cv < vmin else vmin
- vmax = cv if cv > vmax else vmax
- mm = (vmin, vmax, vmax - vmin)
- minMax.append(mm)
-
- return minMax
-
-
-def getRecAsTypedRecord(rec, types, delim=None):
- """
- converts record to typed records
-
- Parameters
- rec : delemeter separate string or list of string
- types : field data types
- delim : delemeter
- """
- if delim is not None:
- rec = rec.split(delim)
- (dtypes, cvalues) = extractTypesFromString(types)
- #print(types)
- #print(dtypes)
- trec = list()
- for ind, value in enumerate(rec):
- tvalue = __convToTyped(ind, value, dtypes)
- trec.append(tvalue)
- return trec
-
-def __convToTyped(index, value, dtypes):
- """
- convert to typed value
-
- Parameters
- index : index in type list
- value : data value
- dtypes : data type list
- """
- #print(index, value)
- dtype = dtypes[index]
- tvalue = value
- if dtype == "int":
- tvalue = int(value)
- elif dtype == "float":
- tvalue = float(value)
- return tvalue
-
-
-
-def extractTypesFromString(types):
- """
- extracts column data types and set values for categorical variables
-
- Parameters
- types : encoded type information
- """
- ftypes = types.split(",")
- dtypes = dict()
- cvalues = dict()
- for ftype in ftypes:
- items = ftype.split(":")
- cindex = int(items[0])
- dtype = items[1]
- dtypes[cindex] = dtype
- if len(items) == 3:
- sitems = items[2].split()
- cvalues[cindex] = sitems
- return (dtypes, cvalues)
-
-def getMultipleFileAsInttMatrix(dirPathWithCol, delim=","):
- """
- extracts int matrix from from csv files given column index for each file.
- num of columns = number of rows in each file and num of rows = number of files
-
- Parameters
- dirPathWithCol: list of file path and collumn index pair
- delim : delemeter
- """
- mat = list()
- minLen = -1
- for path, col in dirPathWithCol:
- colVals = getFileColumnAsInt(path, col, delim)
- if minLen < 0 or len(colVals) < minLen:
- minLen = len(colVals)
- mat.append(colVals)
-
- #make all same length
- mat = list(map(lambda li:li[:minLen], mat))
- return mat
-
-def getMultipleFileAsFloatMatrix(dirPathWithCol, delim=","):
- """
- extracts float matrix from from csv files given column index for each file.
- num of columns = number of rows in each file and num of rows = number of files
-
- Parameters
- dirPathWithCol: list of file path and collumn index pair
- delim : delemeter
- """
- mat = list()
- minLen = -1
- for path, col in dirPathWithCol:
- colVals = getFileColumnAsFloat(path, col, delim)
- if minLen < 0 or len(colVals) < minLen:
- minLen = len(colVals)
- mat.append(colVals)
-
- #make all same length
- mat = list(map(lambda li:li[:minLen], mat))
- return mat
-
-def writeStrListToFile(ldata, filePath, delem=","):
- """
- writes list of dlem separated string or list of list of string to afile
-
- Parameters
- ldata : list data
- filePath : file path
- delim : delemeter
- """
- with open(filePath, "w") as fh:
- for r in ldata:
- if type(r) == list:
- r = delem.join(r)
- fh.write(r + "\n")
-
-def writeFloatListToFile(ldata, prec, filePath):
- """
- writes float list to file, one value per line
-
- Parameters
- ldata : list data
- prec : precision
- filePath : file path
- """
- with open(filePath, "w") as fh:
- for d in ldata:
- fh.write(formatFloat(prec, d) + "\n")
-
-def mutateFileLines(dirPath, mutator, marg, delim=","):
- """
- mutates lines from a file
-
- Parameters
- dirPath : file path
- mutator : mutation callback
- marg : argument for mutation call back
- delim : delemeter
- """
- lines = list()
- for li in fileRecGen(dirPath, delim):
- li = mutator(li) if marg is None else mutator(li, marg)
- lines.append(li)
- return lines
-
-def takeFirst(elems):
- """
- return fisrt item
-
- Parameters
- elems : list of data
- """
- return elems[0]
-
-def takeSecond(elems):
- """
- return 2nd element
-
- Parameters
- elems : list of data
- """
- return elems[1]
-
-def takeThird(elems):
- """
- returns 3rd element
-
- Parameters
- elems : list of data
- """
- return elems[2]
-
-def addToKeyedCounter(dCounter, key, count=1):
- """
- add to to keyed counter
-
- Parameters
- dCounter : dictionary of counters
- key : dictionary key
- count : count to add
- """
- curCount = dCounter.get(key, 0)
- dCounter[key] = curCount + count
-
-def incrKeyedCounter(dCounter, key):
- """
- increment keyed counter
-
- Parameters
- dCounter : dictionary of counters
- key : dictionary key
- """
- addToKeyedCounter(dCounter, key, 1)
-
-def appendKeyedList(dList, key, elem):
- """
- keyed list
-
- Parameters
- dList : dictionary of lists
- key : dictionary key
- elem : value to append
- """
- curList = dList.get(key, [])
- curList.append(elem)
- dList[key] = curList
-
-def isNumber(st):
- """
- Returns True is string is a number
-
- Parameters
- st : string value
- """
- return st.replace('.','',1).isdigit()
-
-def removeNan(values):
- """
- removes nan from list
-
- Parameters
- values : list data
- """
- return list(filter(lambda v: not math.isnan(v), values))
-
-def fileRecGen(filePath, delim = ","):
- """
- file record generator
-
- Parameters
- filePath ; file path
- delim : delemeter
- """
- with open(filePath, "r") as fp:
- for line in fp:
- line = line[:-1]
- if delim is not None:
- line = line.split(delim)
- yield line
-
-def fileSelFieldsRecGen(dirPath, columns, delim=","):
- """
- file record generator given column indices
-
- Parameters
- filePath ; file path
- columns : column indexes as int array or coma separated string
- delim : delemeter
- """
- if type(columns) == str:
- columns = strToIntArray(columns, delim)
- for rec in fileRecGen(dirPath, delim):
- extracted = extractList(rec, columns)
- yield extracted
-
-def fileSelFieldValueGen(dirPath, column, delim=","):
- """
- file record generator for a given column
-
- Parameters
- filePath ; file path
- column : column index
- delim : delemeter
- """
- for rec in fileRecGen(dirPath, delim):
- yield rec[column]
-
-def fileFiltRecGen(filePath, filt, delim = ","):
- """
- file record generator with row filter applied
-
- Parameters
- filePath ; file path
- filt : row filter
- delim : delemeter
- """
- with open(filePath, "r") as fp:
- for line in fp:
- line = line[:-1]
- if delim is not None:
- line = line.split(delim)
- if filt(line):
- yield line
-
-def fileFiltSelFieldsRecGen(filePath, filt, columns, delim = ","):
- """
- file record generator with row and column filter applied
-
- Parameters
- filePath ; file path
- filt : row filter
- columns : column indexes as int array or coma separated string
- delim : delemeter
- """
- columns = strToIntArray(columns, delim)
- with open(filePath, "r") as fp:
- for line in fp:
- line = line[:-1]
- if delim is not None:
- line = line.split(delim)
- if filt(line):
- selected = extractList(line, columns)
- yield selected
-
-def fileTypedRecGen(filePath, ftypes, delim = ","):
- """
- file typed record generator
-
- Parameters
- filePath ; file path
- ftypes : list of field types
- delim : delemeter
- """
- with open(filePath, "r") as fp:
- for line in fp:
- line = line[:-1]
- line = line.split(delim)
- for i in range(0, len(ftypes), 2):
- ci = ftypes[i]
- dtype = ftypes[i+1]
- assertLesser(ci, len(line), "index out of bound")
- if dtype == "int":
- line[ci] = int(line[ci])
- elif dtype == "float":
- line[ci] = float(line[ci])
- else:
- exitWithMsg("invalid data type")
- yield line
-
-def fileMutatedFieldsRecGen(dirPath, mutator, delim=","):
- """
- file record generator with some columns mutated
-
- Parameters
- dirPath ; file path
- mutator : row field mutator
- delim : delemeter
- """
- for rec in fileRecGen(dirPath, delim):
- mutated = mutator(rec)
- yield mutated
-
-def tableSelFieldsFilter(tdata, columns):
- """
- gets tabular data for selected columns
-
- Parameters
- tdata : tabular data
- columns : column indexes
- """
- if areAllFieldsIncluded(tdata[0], columns):
- ntdata = tdata
- else:
- ntdata = list()
- for rec in tdata:
- #print(rec)
- #print(columns)
- nrec = extractList(rec, columns)
- ntdata.append(nrec)
- return ntdata
-
-
-def areAllFieldsIncluded(ldata, columns):
- """
- return True id all indexes are in the columns
-
- Parameters
- ldata : list data
- columns : column indexes
- """
- return list(range(len(ldata))) == columns
-
-def asIntList(items):
- """
- returns int list
-
- Parameters
- items : list data
- """
- return [int(i) for i in items]
-
-def asFloatList(items):
- """
- returns float list
-
- Parameters
- items : list data
- """
- return [float(i) for i in items]
-
-def pastTime(interval, unit):
- """
- current and past time
-
- Parameters
- interval : time interval
- unit: time unit
- """
- curTime = int(time.time())
- if unit == "d":
- pastTime = curTime - interval * secInDay
- elif unit == "h":
- pastTime = curTime - interval * secInHour
- elif unit == "m":
- pastTime = curTime - interval * secInMinute
- else:
- raise ValueError("invalid time unit " + unit)
- return (curTime, pastTime)
-
-def minuteAlign(ts):
- """
- minute aligned time
-
- Parameters
- ts : time stamp in sec
- """
- return int((ts / secInMinute)) * secInMinute
-
-def multMinuteAlign(ts, min):
- """
- multi minute aligned time
-
- Parameters
- ts : time stamp in sec
- min : minute value
- """
- intv = secInMinute * min
- return int((ts / intv)) * intv
-
-def hourAlign(ts):
- """
- hour aligned time
-
- Parameters
- ts : time stamp in sec
- """
- return int((ts / secInHour)) * secInHour
-
-def hourOfDayAlign(ts, hour):
- """
- hour of day aligned time
-
- Parameters
- ts : time stamp in sec
- hour : hour of day
- """
- day = int(ts / secInDay)
- return (24 * day + hour) * secInHour
-
-def dayAlign(ts):
- """
- day aligned time
-
- Parameters
- ts : time stamp in sec
- """
- return int(ts / secInDay) * secInDay
-
-def timeAlign(ts, unit):
- """
- boundary alignment of time
-
- Parameters
- ts : time stamp in sec
- unit : unit of time
- """
- alignedTs = 0
- if unit == "s":
- alignedTs = ts
- elif unit == "m":
- alignedTs = minuteAlign(ts)
- elif unit == "h":
- alignedTs = hourAlign(ts)
- elif unit == "d":
- alignedTs = dayAlign(ts)
- else:
- raise ValueError("invalid time unit")
- return alignedTs
-
-def monthOfYear(ts):
- """
- month of year
-
- Parameters
- ts : time stamp in sec
- """
- rem = ts % secInYear
- dow = int(rem / secInMonth)
- return dow
-
-def dayOfWeek(ts):
- """
- day of week
-
- Parameters
- ts : time stamp in sec
- """
- rem = ts % secInWeek
- dow = int(rem / secInDay)
- return dow
-
-def hourOfDay(ts):
- """
- hour of day
-
- Parameters
- ts : time stamp in sec
- """
- rem = ts % secInDay
- hod = int(rem / secInHour)
- return hod
-
-def processCmdLineArgs(expectedTypes, usage):
- """
- process command line args and returns args as typed values
-
- Parameters
- expectedTypes : expected data types of arguments
- usage : usage message string
- """
- args = []
- numComLineArgs = len(sys.argv)
- numExpected = len(expectedTypes)
- if (numComLineArgs - 1 == len(expectedTypes)):
- try:
- for i in range(0, numExpected):
- if (expectedTypes[i] == typeInt):
- args.append(int(sys.argv[i+1]))
- elif (expectedTypes[i] == typeFloat):
- args.append(float(sys.argv[i+1]))
- elif (expectedTypes[i] == typeString):
- args.append(sys.argv[i+1])
- except ValueError:
- print ("expected number of command line arguments found but there is type mis match")
- sys.exit(1)
- else:
- print ("expected number of command line arguments not found")
- print (usage)
- sys.exit(1)
- return args
-
-def mutateString(val, numMutate, ctype):
- """
- mutate string multiple times
-
- Parameters
- val : string value
- numMutate : num of mutations
- ctype : type of character to mutate with
- """
- mutations = set()
- count = 0
- while count < numMutate:
- j = randint(0, len(val)-1)
- if j not in mutations:
- if ctype == "alpha":
- ch = selectRandomFromList(alphaTokens)
- elif ctype == "num":
- ch = selectRandomFromList(numTokens)
- elif ctype == "any":
- ch = selectRandomFromList(tokens)
- val = val[:j] + ch + val[j+1:]
- mutations.add(j)
- count += 1
- return val
-
-def mutateList(values, numMutate, vmin, vmax, rabs=True):
- """
- mutate list multiple times
-
- Parameters
- values : list value
- numMutate : num of mutations
- vmin : minimum of value range
- vmax : maximum of value range
- rabs : True if mim max range is absolute otherwise relative
- """
- mutations = set()
- count = 0
- while count < numMutate:
- j = randint(0, len(values)-1)
- if j not in mutations:
- s = np.random.uniform(vmin, vmax)
- values[j] = s if rabs else values[j] * s
- count += 1
- mutations.add(j)
- return values
-
-
-def swap(values, first, second):
- """
- swap two elements
-
- Parameters
- values : list value
- first : first swap position
- second : second swap position
- """
- t = values[first]
- values[first] = values[second]
- values[second] = t
-
-def swapBetweenLists(values1, values2):
- """
- swap two elements between 2 lists
-
- Parameters
- values1 : first list of values
- values2 : second list of values
- """
- p1 = randint(0, len(values1)-1)
- p2 = randint(0, len(values2)-1)
- tmp = values1[p1]
- values1[p1] = values2[p2]
- values2[p2] = tmp
-
-def safeAppend(values, value):
- """
- append only if not None
-
- Parameters
- values : list value
- value : value to append
- """
- if value is not None:
- values.append(value)
-
-def getAllIndex(ldata, fldata):
- """
- get ALL indexes of list elements
-
- Parameters
- ldata : list data to find index in
- fldata : list data for values for index look up
- """
- return list(map(lambda e : fldata.index(e), ldata))
-
-def findIntersection(lOne, lTwo):
- """
- find intersection elements between 2 lists
-
- Parameters
- lOne : first list of data
- lTwo : second list of data
- """
- sOne = set(lOne)
- sTwo = set(lTwo)
- sInt = sOne.intersection(sTwo)
- return list(sInt)
-
-def isIntvOverlapped(rOne, rTwo):
- """
- checks overlap between 2 intervals
-
- Parameters
- rOne : first interval boundaries
- rTwo : second interval boundaries
- """
- clear = rOne[1] <= rTwo[0] or rOne[0] >= rTwo[1]
- return not clear
-
-def isIntvLess(rOne, rTwo):
- """
- checks if first iterval is less than second
-
- Parameters
- rOne : first interval boundaries
- rTwo : second interval boundaries
- """
- less = rOne[1] <= rTwo[0]
- return less
-
-def findRank(e, values):
- """
- find rank of value in a list
-
- Parameters
- e : value to compare with
- values : list data
- """
- count = 1
- for ve in values:
- if ve < e:
- count += 1
- return count
-
-def findRanks(toBeRanked, values):
- """
- find ranks of values in one list in another list
-
- Parameters
- toBeRanked : list of values for which ranks are found
- values : list in which rank is found :
- """
- return list(map(lambda e: findRank(e, values), toBeRanked))
-
-def formatFloat(prec, value, label = None):
- """
- formats a float with optional label
-
- Parameters
- prec : precision
- value : data value
- label : label for data
- """
- st = (label + " ") if label else ""
- formatter = "{:." + str(prec) + "f}"
- return st + formatter.format(value)
-
-def formatAny(value, label = None):
- """
- formats any obkect with optional label
-
- Parameters
- value : data value
- label : label for data
- """
- st = (label + " ") if label else ""
- return st + str(value)
-
-def printList(values):
- """
- pretty print list
-
- Parameters
- values : list of values
- """
- for v in values:
- print(v)
-
-def printMap(values, klab, vlab, precision, offset=16):
- """
- pretty print hash map
-
- Parameters
- values : dictionary of values
- klab : label for key
- vlab : label for value
- precision : precision
- offset : left justify offset
- """
- print(klab.ljust(offset, " ") + vlab)
- for k in values.keys():
- v = values[k]
- ks = toStr(k, precision).ljust(offset, " ")
- vs = toStr(v, precision)
- print(ks + vs)
-
-def printPairList(values, lab1, lab2, precision, offset=16):
- """
- pretty print list of pairs
-
- Parameters
- values : dictionary of values
- lab1 : first label
- lab2 : second label
- precision : precision
- offset : left justify offset
- """
- print(lab1.ljust(offset, " ") + lab2)
- for (v1, v2) in values:
- sv1 = toStr(v1, precision).ljust(offset, " ")
- sv2 = toStr(v2, precision)
- print(sv1 + sv2)
-
-def createMap(*values):
- """
- create disctionary with results
-
- Parameters
- values : sequence of key value pairs
- """
- result = dict()
- for i in range(0, len(values), 2):
- result[values[i]] = values[i+1]
- return result
-
-def getColMinMax(table, col):
- """
- return min, max values of a column
-
- Parameters
- table : tabular data
- col : column index
- """
- vmin = None
- vmax = None
- for rec in table:
- value = rec[col]
- if vmin is None:
- vmin = value
- vmax = value
- else:
- if value < vmin:
- vmin = value
- elif value > vmax:
- vmax = value
- return (vmin, vmax, vmax - vmin)
-
-def createLogger(name, logFilePath, logLevName):
- """
- creates logger
-
- Parameters
- name : logger name
- logFilePath : log file path
- logLevName : log level
- """
- logger = logging.getLogger(name)
- fHandler = logging.handlers.RotatingFileHandler(logFilePath, maxBytes=1048576, backupCount=4)
- logLev = logLevName.lower()
- if logLev == "debug":
- logLevel = logging.DEBUG
- elif logLev == "info":
- logLevel = logging.INFO
- elif logLev == "warning":
- logLevel = logging.WARNING
- elif logLev == "error":
- logLevel = logging.ERROR
- elif logLev == "critical":
- logLevel = logging.CRITICAL
- else:
- raise ValueError("invalid log level name " + logLevelName)
- fHandler.setLevel(logLevel)
- fFormat = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
- fHandler.setFormatter(fFormat)
- logger.addHandler(fHandler)
- logger.setLevel(logLevel)
- return logger
-
-@contextmanager
-def suppressStdout():
- """
- suppress stdout
-
- Parameters
-
- """
- with open(os.devnull, "w") as devnull:
- oldStdout = sys.stdout
- sys.stdout = devnull
- try:
- yield
- finally:
- sys.stdout = oldStdout
-
-def exitWithMsg(msg):
- """
- print message and exit
-
- Parameters
- msg : message
- """
- print(msg + " -- quitting")
- sys.exit(0)
-
-def drawLine(data, yscale=None):
- """
- line plot
-
- Parameters
- data : list data
- yscale : y axis scale
- """
- plt.plot(data)
- if yscale:
- step = int(yscale / 10)
- step = int(step / 10) * 10
- plt.yticks(range(0, yscale, step))
- plt.show()
-
-def drawPlot(x, y, xlabel, ylabel):
- """
- line plot
-
- Parameters
- x : x values
- y : y values
- xlabel : x axis label
- ylabel : y axis label
- """
- if x is None:
- x = list(range(len(y)))
- plt.plot(x,y)
- plt.xlabel(xlabel)
- plt.ylabel(ylabel)
- plt.show()
-
-def drawPairPlot(x, y1, y2, xlabel,ylabel, y1label, y2label):
- """
- line plot of 2 lines
-
- Parameters
- x : x values
- y1 : first y values
- y2 : second y values
- xlabel : x labbel
- ylabel : y label
- y1label : first plot label
- y2label : second plot label
- """
- plt.plot(x, y1, label = y1label)
- plt.plot(x, y2, label = y2label)
- plt.xlabel(xlabel)
- plt.ylabel(ylabel)
- plt.legend()
- plt.show()
-
-def drawHist(ldata, myTitle, myXlabel, myYlabel, nbins=10):
- """
- draw histogram
-
- Parameters
- ldata : list data
- myTitle : title
- myXlabel : x label
- myYlabel : y label
- nbins : num of bins
- """
- plt.hist(ldata, bins=nbins, density=True)
- plt.title(myTitle)
- plt.xlabel(myXlabel)
- plt.ylabel(myYlabel)
- plt.show()
-
-def saveObject(obj, filePath):
- """
- saves an object
-
- Parameters
- obj : object
- filePath : file path for saved object
- """
- with open(filePath, "wb") as outfile:
- pickle.dump(obj,outfile)
-
-def restoreObject(filePath):
- """
- restores an object
-
- Parameters
- filePath : file path to restore object from
- """
- with open(filePath, "rb") as infile:
- obj = pickle.load(infile)
- return obj
-
-def isNumeric(data):
- """
- true if all elements int or float
-
- Parameters
- data : numeric data list
- """
- if type(data) == list or type(data) == np.ndarray:
- col = pd.Series(data)
- else:
- col = data
- return col.dtype == np.int32 or col.dtype == np.int64 or col.dtype == np.float32 or col.dtype == np.float64
-
-def isInteger(data):
- """
- true if all elements int
-
- Parameters
- data : numeric data list
- """
- if type(data) == list or type(data) == np.ndarray:
- col = pd.Series(data)
- else:
- col = data
- return col.dtype == np.int32 or col.dtype == np.int64
-
-def isFloat(data):
- """
- true if all elements float
-
- Parameters
- data : numeric data list
- """
- if type(data) == list or type(data) == np.ndarray:
- col = pd.Series(data)
- else:
- col = data
- return col.dtype == np.float32 or col.dtype == np.float64
-
-def isBinary(data):
- """
- true if all elements either 0 or 1
-
- Parameters
- data : binary data
- """
- re = next((d for d in data if not (type(d) == int and (d == 0 or d == 1))), None)
- return (re is None)
-
-def isCategorical(data):
- """
- true if all elements int or string
-
- Parameters
- data : data value
- """
- re = next((d for d in data if not (type(d) == int or type(d) == str)), None)
- return (re is None)
-
-def assertEqual(value, veq, msg):
- """
- assert equal to
-
- Parameters
- value : value
- veq : value to be equated with
- msg : error msg
- """
- assert value == veq , msg
-
-def assertGreater(value, vmin, msg):
- """
- assert greater than
-
- Parameters
- value : value
- vmin : minimum value
- msg : error msg
- """
- assert value > vmin , msg
-
-def assertGreaterEqual(value, vmin, msg):
- """
- assert greater than
-
- Parameters
- value : value
- vmin : minimum value
- msg : error msg
- """
- assert value >= vmin , msg
-
-def assertLesser(value, vmax, msg):
- """
- assert less than
-
- Parameters
- value : value
- vmax : maximum value
- msg : error msg
- """
- assert value < vmax , msg
-
-def assertLesserEqual(value, vmax, msg):
- """
- assert less than
-
- Parameters
- value : value
- vmax : maximum value
- msg : error msg
- """
- assert value <= vmax , msg
-
-def assertWithinRange(value, vmin, vmax, msg):
- """
- assert within range
-
- Parameters
- value : value
- vmin : minimum value
- vmax : maximum value
- msg : error msg
- """
- assert value >= vmin and value <= vmax, msg
-
-def assertInList(value, values, msg):
- """
- assert contains in a list
-
- Parameters
- value ; balue to check for inclusion
- values : list data
- msg : error msg
- """
- assert value in values, msg
-
-def maxListDist(l1, l2):
- """
- maximum list element difference between 2 lists
-
- Parameters
- l1 : first list data
- l2 : second list data
- """
- dist = max(list(map(lambda v : abs(v[0] - v[1]), zip(l1, l2))))
- return dist
-
-def fileLineCount(fPath):
- """
- number of lines ina file
-
- Parameters
- fPath : file path
- """
- with open(fPath) as f:
- for i, li in enumerate(f):
- pass
- return (i + 1)
-
-def getAlphaNumCharCount(sdata):
- """
- number of alphabetic and numeric charcters in a string
-
- Parameters
- sdata : string data
- """
- acount = 0
- ncount = 0
- scount = 0
- ocount = 0
- assertEqual(type(sdata), str, "input must be string")
- for c in sdata:
- if c.isnumeric():
- ncount += 1
- elif c.isalpha():
- acount += 1
- elif c.isspace():
- scount += 1
- else:
- ocount += 1
- r = (acount, ncount, ocount)
- return r
-
-def genPowerSet(cvalues, incEmpty=False):
- """
- generates power set i.e all possible subsets
-
- Parameters
- cvalues : list of categorical values
- incEmpty : include empty set if True
- """
- ps = list()
- for cv in cvalues:
- pse = list()
- for s in ps:
- sc = s.copy()
- sc.add(cv)
- #print(sc)
- pse.append(sc)
- ps.extend(pse)
- es = set()
- es.add(cv)
- ps.append(es)
- #print(es)
-
- if incEmpty:
- ps.append({})
- return ps
-
-class StepFunction:
- """
- step function
-
- Parameters
-
- """
- def __init__(self, *values):
- """
- initilizer
-
- Parameters
- values : list of tuples, wich each tuple containing 2 x values and corresponding y value
- """
- self.points = values
-
- def find(self, x):
- """
- finds step function value
-
- Parameters
- x : x value
- """
- found = False
- y = 0
- for p in self.points:
- if (x >= p[0] and x < p[1]):
- y = p[2]
- found = True
- break
-
- if not found:
- l = len(self.points)
- if (x < self.points[0][0]):
- y = self.points[0][2]
- elif (x > self.points[l-1][1]):
- y = self.points[l-1][2]
- return y
-
-
-class DummyVarGenerator:
- """
- dummy variable generator for categorical variable
- """
- def __init__(self, rowSize, catValues, trueVal, falseVal, delim=None):
- """
- initilizer
-
- Parameters
- rowSize : row size
- catValues : dictionary with field index as key and list of categorical values as value
- trueVal : true value, typically "1"
- falseval : false value , typically "0"
- delim : field delemeter
- """
- self.rowSize = rowSize
- self.catValues = catValues
- numCatVar = len(catValues)
- colCount = 0
- for v in self.catValues.values():
- colCount += len(v)
- self.newRowSize = rowSize - numCatVar + colCount
- #print ("new row size {}".format(self.newRowSize))
- self.trueVal = trueVal
- self.falseVal = falseVal
- self.delim = delim
-
- def processRow(self, row):
- """
- encodes categorical variables, returning as delemeter separate dstring or list
-
- Parameters
- row : row either delemeter separated string or list
- """
- if self.delim is not None:
- rowArr = row.split(self.delim)
- msg = "row does not have expected number of columns found " + str(len(rowArr)) + " expected " + str(self.rowSize)
- assert len(rowArr) == self.rowSize, msg
- else:
- rowArr = row
-
- newRowArr = []
- for i in range(len(rowArr)):
- curVal = rowArr[i]
- if (i in self.catValues):
- values = self.catValues[i]
- for val in values:
- if val == curVal:
- newVal = self.trueVal
- else:
- newVal = self.falseVal
- newRowArr.append(newVal)
- else:
- newRowArr.append(curVal)
- assert len(newRowArr) == self.newRowSize, "invalid new row size " + str(len(newRowArr)) + " expected " + str(self.newRowSize)
- encRow = self.delim.join(newRowArr) if self.delim is not None else newRowArr
- return encRow
-
-
diff --git a/spaces/TusharNautiyal/Music-Genre-Classification/app.py b/spaces/TusharNautiyal/Music-Genre-Classification/app.py
deleted file mode 100644
index 67dbe2f65e71e1204585caed0096839fcd039c03..0000000000000000000000000000000000000000
--- a/spaces/TusharNautiyal/Music-Genre-Classification/app.py
+++ /dev/null
@@ -1,132 +0,0 @@
-import numpy as np
-import librosa
-import math
-from tensorflow import keras
-import streamlit as st
-import time
-
-def get_mfcc(audio_signal, num_mfcc=13, n_fft=2048, hop_length=512, num_segments=5):
- #This function will be extracting mfcc from our audio signal.
- new_data = {
- "mfcc": []
- }
-
- SAMPLE_RATE = 22050
- signal,sample_rate = librosa.load(audio_signal,sr = SAMPLE_RATE)
- TRACK_DURATION = int(librosa.get_duration(signal)) # measured in seconds
- SAMPLES_PER_TRACK = SAMPLE_RATE * TRACK_DURATION
- samples_per_segment = int(SAMPLES_PER_TRACK / num_segments)
- num_mfcc_vectors_per_segment = math.ceil(samples_per_segment / hop_length)
-
- for d in range(num_segments):
- # calculate start and finish sample for current segment
- start = samples_per_segment * d
- finish = start + samples_per_segment
- # extract mfcc
- mfcc = librosa.feature.mfcc(signal[start:finish], sample_rate, n_mfcc=num_mfcc, n_fft=n_fft, hop_length=hop_length)
- mfcc = mfcc.T
- # store only mfcc feature with expected number of vectors
- if len(mfcc) == num_mfcc_vectors_per_segment:
- new_data["mfcc"].append(mfcc.tolist())
-
- return new_data["mfcc"]
-
-def prediction(mfcc):
- # This function will provide us with prediction labels from our CNN model.
- cnn_model = keras.models.load_model('music-gen-clasiify-v1.h5')
- mfcc = np.array(mfcc)
- mfcc = mfcc[...,np.newaxis]
- prediction = cnn_model.predict(mfcc)
- return max(np.argmax(prediction,axis = 1))
-
-def get_genre(prediction):
- # This function will provide us with genre.
- pred = ''
- if prediction == 0:
- pred = 'Blues'
-
- elif prediction == 1:
- pred = 'Classical'
-
- elif prediction == 2:
- pred = 'Country'
-
- elif prediction == 3:
- pred = 'Disco'
-
- elif prediction == 4:
- pred = 'Hip Hop'
-
- elif prediction == 5:
- pred = 'Jazz'
-
- elif prediction == 6:
- pred = 'Metal'
-
- elif prediction == 7:
- pred = 'Pop'
-
- elif prediction == 8:
- pred = 'Reggae'
-
- elif prediction == 9:
- pred = 'Rock'
-
- return pred
-
-
-def main():
- # Few Instructions
- # The music sample should not exceed more then 30 sec.
- # 0-> Blues 1-> classical 2-> country 3-> disco 4-> hiphop 5-> jazz 6-> metal 7-> pop 8-> reggae 9-> rock
- # Right now only 10 genres are supported as we used GTZAN Dataset for music Genre Classification.
- st.set_page_config(layout='wide',page_title='Genre Classification',page_icon='🎵')
- st.title('Music Genre Classifcation With CNN')
- st.markdown('We use **GTZAN** Dataset which is a very popular dataset for Audio Classification. The Uploaded sample of audio file should be of less then **30sec** and **.WAV** format for best results try to provide sections that have the most **elemental** or **instrumental ensemble** and should be of 30sec. If you want to test the model select ***Untrained Samples***. The model right now support only 10 genre which are blues, jazz, rock, metal, country, reagge, hiphop, pop, disco. A project by Tushar Nautiyal')
- selected_item = st.selectbox('Select Either Uploaded Samples or Upload your own',['Untrained Samples','Upload'])
- # after this selection we will upload file or use a untrained Samples.
- if selected_item is not None:
- if selected_item == 'Upload':
- files = st.file_uploader('Select .WAV File with maximum 30sec Time', type='wav', accept_multiple_files=False)
-
- if files is not None:
- audio,sr = librosa.load(files,sr = 22050)
- duration = int(librosa.get_duration(audio))
- if 'file_uploaded' not in st.session_state:
- st.session_state['file_uploaded'] = True
-
- if duration>30:
- st.session_state['file_uploaded'] = False
- st.write('Reupload File as it exceeds the time limit')
- bar = st.progress(0)
- i = 0
- st.write('Please Reupload Files')
- for percent_complete in range(100):
- time.sleep(0.01)
- bar.progress(i+1)
- i = i+1
- st.write("Reupload files Thank You.")
-
- elif st.session_state['file_uploaded'] == True:
- st.audio(files, format="audio/wav", start_time=0)
-
-
- elif selected_item == 'Untrained Samples':
- selected_file = st.selectbox("Select A Sample", ['Blues','Jazz','Country','Classical','Hiphop','Metal','Pop','Reggae','Rock'])
- files = f'{selected_file}.wav'
- st.audio(files, format="audio/wav", start_time=0)
- submitted = st.button("Submit")
-
- if submitted:
- with st.spinner('Model is Trying to predict your genre! Wait for it'):
- signal = files
- mfcc_for_track = get_mfcc(signal)
-
- # After getting mfcc lets use our model to predict
- predict = prediction(mfcc_for_track)
- genre = get_genre(int(predict))
- st.success('Yes its Done and here is the answer!')
- st.markdown(f'The Genre for your music is 🎵 : **{genre}** Music')
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff --git a/spaces/Vasanthgx/Pet_Classifier_vasanth/README.md b/spaces/Vasanthgx/Pet_Classifier_vasanth/README.md
deleted file mode 100644
index bfa8f8c0c6a7562aafce2facdfb84cfe5cf8f228..0000000000000000000000000000000000000000
--- a/spaces/Vasanthgx/Pet_Classifier_vasanth/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Pet Classifier Vasanth
-emoji: 👁
-colorFrom: red
-colorTo: purple
-sdk: gradio
-sdk_version: 3.24.1
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Wing0820/Real-CUGAN/upcunet_v3.py b/spaces/Wing0820/Real-CUGAN/upcunet_v3.py
deleted file mode 100644
index f7919a6cc9efe3b8af73a73e30825a4c7d7d76da..0000000000000000000000000000000000000000
--- a/spaces/Wing0820/Real-CUGAN/upcunet_v3.py
+++ /dev/null
@@ -1,714 +0,0 @@
-import torch
-from torch import nn as nn
-from torch.nn import functional as F
-import os, sys
-import numpy as np
-
-root_path = os.path.abspath('.')
-sys.path.append(root_path)
-
-
-class SEBlock(nn.Module):
- def __init__(self, in_channels, reduction=8, bias=False):
- super(SEBlock, self).__init__()
- self.conv1 = nn.Conv2d(in_channels, in_channels // reduction, 1, 1, 0, bias=bias)
- self.conv2 = nn.Conv2d(in_channels // reduction, in_channels, 1, 1, 0, bias=bias)
-
- def forward(self, x):
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- x0 = torch.mean(x.float(), dim=(2, 3), keepdim=True).half()
- else:
- x0 = torch.mean(x, dim=(2, 3), keepdim=True)
- x0 = self.conv1(x0)
- x0 = F.relu(x0, inplace=True)
- x0 = self.conv2(x0)
- x0 = torch.sigmoid(x0)
- x = torch.mul(x, x0)
- return x
-
- def forward_mean(self, x, x0):
- x0 = self.conv1(x0)
- x0 = F.relu(x0, inplace=True)
- x0 = self.conv2(x0)
- x0 = torch.sigmoid(x0)
- x = torch.mul(x, x0)
- return x
-
-
-class UNetConv(nn.Module):
- def __init__(self, in_channels, mid_channels, out_channels, se):
- super(UNetConv, self).__init__()
- self.conv = nn.Sequential(
- nn.Conv2d(in_channels, mid_channels, 3, 1, 0),
- nn.LeakyReLU(0.1, inplace=True),
- nn.Conv2d(mid_channels, out_channels, 3, 1, 0),
- nn.LeakyReLU(0.1, inplace=True),
- )
- if se:
- self.seblock = SEBlock(out_channels, reduction=8, bias=True)
- else:
- self.seblock = None
-
- def forward(self, x):
- z = self.conv(x)
- if self.seblock is not None:
- z = self.seblock(z)
- return z
-
-
-class UNet1(nn.Module):
- def __init__(self, in_channels, out_channels, deconv):
- super(UNet1, self).__init__()
- self.conv1 = UNetConv(in_channels, 32, 64, se=False)
- self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0)
- self.conv2 = UNetConv(64, 128, 64, se=True)
- self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0)
- self.conv3 = nn.Conv2d(64, 64, 3, 1, 0)
-
- if deconv:
- self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3)
- else:
- self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0)
-
- for m in self.modules():
- if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
- elif isinstance(m, nn.Linear):
- nn.init.normal_(m.weight, 0, 0.01)
- if m.bias is not None:
- nn.init.constant_(m.bias, 0)
-
- def forward(self, x):
- x1 = self.conv1(x)
- x2 = self.conv1_down(x1)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
- x2 = self.conv2(x2)
- x2 = self.conv2_up(x2)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
-
- x1 = F.pad(x1, (-4, -4, -4, -4))
- x3 = self.conv3(x1 + x2)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
- z = self.conv_bottom(x3)
- return z
-
- def forward_a(self, x):
- x1 = self.conv1(x)
- x2 = self.conv1_down(x1)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
- x2 = self.conv2.conv(x2)
- return x1, x2
-
- def forward_b(self, x1, x2):
- x2 = self.conv2_up(x2)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
-
- x1 = F.pad(x1, (-4, -4, -4, -4))
- x3 = self.conv3(x1 + x2)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
- z = self.conv_bottom(x3)
- return z
-
-
-class UNet1x3(nn.Module):
- def __init__(self, in_channels, out_channels, deconv):
- super(UNet1x3, self).__init__()
- self.conv1 = UNetConv(in_channels, 32, 64, se=False)
- self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0)
- self.conv2 = UNetConv(64, 128, 64, se=True)
- self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0)
- self.conv3 = nn.Conv2d(64, 64, 3, 1, 0)
-
- if deconv:
- self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 5, 3, 2)
- else:
- self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0)
-
- for m in self.modules():
- if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
- elif isinstance(m, nn.Linear):
- nn.init.normal_(m.weight, 0, 0.01)
- if m.bias is not None:
- nn.init.constant_(m.bias, 0)
-
- def forward(self, x):
- x1 = self.conv1(x)
- x2 = self.conv1_down(x1)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
- x2 = self.conv2(x2)
- x2 = self.conv2_up(x2)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
-
- x1 = F.pad(x1, (-4, -4, -4, -4))
- x3 = self.conv3(x1 + x2)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
- z = self.conv_bottom(x3)
- return z
-
- def forward_a(self, x):
- x1 = self.conv1(x)
- x2 = self.conv1_down(x1)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
- x2 = self.conv2.conv(x2)
- return x1, x2
-
- def forward_b(self, x1, x2):
- x2 = self.conv2_up(x2)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
-
- x1 = F.pad(x1, (-4, -4, -4, -4))
- x3 = self.conv3(x1 + x2)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
- z = self.conv_bottom(x3)
- return z
-
-
-class UNet2(nn.Module):
- def __init__(self, in_channels, out_channels, deconv):
- super(UNet2, self).__init__()
-
- self.conv1 = UNetConv(in_channels, 32, 64, se=False)
- self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0)
- self.conv2 = UNetConv(64, 64, 128, se=True)
- self.conv2_down = nn.Conv2d(128, 128, 2, 2, 0)
- self.conv3 = UNetConv(128, 256, 128, se=True)
- self.conv3_up = nn.ConvTranspose2d(128, 128, 2, 2, 0)
- self.conv4 = UNetConv(128, 64, 64, se=True)
- self.conv4_up = nn.ConvTranspose2d(64, 64, 2, 2, 0)
- self.conv5 = nn.Conv2d(64, 64, 3, 1, 0)
-
- if deconv:
- self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3)
- else:
- self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0)
-
- for m in self.modules():
- if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
- elif isinstance(m, nn.Linear):
- nn.init.normal_(m.weight, 0, 0.01)
- if m.bias is not None:
- nn.init.constant_(m.bias, 0)
-
- def forward(self, x):
- x1 = self.conv1(x)
- x2 = self.conv1_down(x1)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
- x2 = self.conv2(x2)
-
- x3 = self.conv2_down(x2)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
- x3 = self.conv3(x3)
- x3 = self.conv3_up(x3)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
-
- x2 = F.pad(x2, (-4, -4, -4, -4))
- x4 = self.conv4(x2 + x3)
- x4 = self.conv4_up(x4)
- x4 = F.leaky_relu(x4, 0.1, inplace=True)
-
- x1 = F.pad(x1, (-16, -16, -16, -16))
- x5 = self.conv5(x1 + x4)
- x5 = F.leaky_relu(x5, 0.1, inplace=True)
-
- z = self.conv_bottom(x5)
- return z
-
- def forward_a(self, x): # conv234结尾有se
- x1 = self.conv1(x)
- x2 = self.conv1_down(x1)
- x2 = F.leaky_relu(x2, 0.1, inplace=True)
- x2 = self.conv2.conv(x2)
- return x1, x2
-
- def forward_b(self, x2): # conv234结尾有se
- x3 = self.conv2_down(x2)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
- x3 = self.conv3.conv(x3)
- return x3
-
- def forward_c(self, x2, x3): # conv234结尾有se
- x3 = self.conv3_up(x3)
- x3 = F.leaky_relu(x3, 0.1, inplace=True)
-
- x2 = F.pad(x2, (-4, -4, -4, -4))
- x4 = self.conv4.conv(x2 + x3)
- return x4
-
- def forward_d(self, x1, x4): # conv234结尾有se
- x4 = self.conv4_up(x4)
- x4 = F.leaky_relu(x4, 0.1, inplace=True)
-
- x1 = F.pad(x1, (-16, -16, -16, -16))
- x5 = self.conv5(x1 + x4)
- x5 = F.leaky_relu(x5, 0.1, inplace=True)
-
- z = self.conv_bottom(x5)
- return z
-
-
-class UpCunet2x(nn.Module): # 完美tile,全程无损
- def __init__(self, in_channels=3, out_channels=3):
- super(UpCunet2x, self).__init__()
- self.unet1 = UNet1(in_channels, out_channels, deconv=True)
- self.unet2 = UNet2(in_channels, out_channels, deconv=False)
-
- def forward(self, x, tile_mode): # 1.7G
- n, c, h0, w0 = x.shape
- if (tile_mode == 0): # 不tile
- ph = ((h0 - 1) // 2 + 1) * 2
- pw = ((w0 - 1) // 2 + 1) * 2
- x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect') # 需要保证被2整除
- x = self.unet1.forward(x)
- x0 = self.unet2.forward(x)
- x1 = F.pad(x, (-20, -20, -20, -20))
- x = torch.add(x0, x1)
- if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 2, :w0 * 2]
- return x
- elif (tile_mode == 1): # 对长边减半
- if (w0 >= h0):
- crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
- crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除
- else:
- crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
- crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除
- crop_size = (crop_size_h, crop_size_w) # 6.6G
- elif (tile_mode == 2): # hw都减半
- crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G
- elif (tile_mode == 3): # hw都三分之一
- crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.2G
- elif (tile_mode == 4): # hw都四分之一
- crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G
- ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
- pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
- x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect')
- n, c, h, w = x.shape
- se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device)
- if ("Half" in x.type()):
- se_mean0 = se_mean0.half()
- n_patch = 0
- tmp_dict = {}
- opt_res_dict = {}
- for i in range(0, h - 36, crop_size[0]):
- tmp_dict[i] = {}
- for j in range(0, w - 36, crop_size[1]):
- x_crop = x[:, :, i:i + crop_size[0] + 36, j:j + crop_size[1] + 36]
- n, c1, h1, w1 = x_crop.shape
- tmp0, x_crop = self.unet1.forward_a(x_crop)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True)
- se_mean0 += tmp_se_mean
- n_patch += 1
- tmp_dict[i][j] = (tmp0, x_crop)
- se_mean0 /= n_patch
- se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean1 = se_mean1.half()
- for i in range(0, h - 36, crop_size[0]):
- for j in range(0, w - 36, crop_size[1]):
- tmp0, x_crop = tmp_dict[i][j]
- x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0)
- opt_unet1 = self.unet1.forward_b(tmp0, x_crop)
- tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True)
- se_mean1 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2)
- se_mean1 /= n_patch
- se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean0 = se_mean0.half()
- for i in range(0, h - 36, crop_size[0]):
- for j in range(0, w - 36, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j]
- tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1)
- tmp_x3 = self.unet2.forward_b(tmp_x2)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True)
- se_mean0 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3)
- se_mean0 /= n_patch
- se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean1 = se_mean1.half()
- for i in range(0, h - 36, crop_size[0]):
- for j in range(0, w - 36, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j]
- tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0)
- tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True)
- se_mean1 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4)
- se_mean1 /= n_patch
- for i in range(0, h - 36, crop_size[0]):
- opt_res_dict[i] = {}
- for j in range(0, w - 36, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j]
- tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1)
- x0 = self.unet2.forward_d(tmp_x1, tmp_x4)
- x1 = F.pad(opt_unet1, (-20, -20, -20, -20))
- x_crop = torch.add(x0, x1) # x0是unet2的最终输出
- opt_res_dict[i][j] = x_crop
- del tmp_dict
- torch.cuda.empty_cache()
- res = torch.zeros((n, c, h * 2 - 72, w * 2 - 72)).to(x.device)
- if ("Half" in x.type()):
- res = res.half()
- for i in range(0, h - 36, crop_size[0]):
- for j in range(0, w - 36, crop_size[1]):
- res[:, :, i * 2:i * 2 + h1 * 2 - 72, j * 2:j * 2 + w1 * 2 - 72] = opt_res_dict[i][j]
- del opt_res_dict
- torch.cuda.empty_cache()
- if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 2, :w0 * 2]
- return res #
-
-
-class UpCunet3x(nn.Module): # 完美tile,全程无损
- def __init__(self, in_channels=3, out_channels=3):
- super(UpCunet3x, self).__init__()
- self.unet1 = UNet1x3(in_channels, out_channels, deconv=True)
- self.unet2 = UNet2(in_channels, out_channels, deconv=False)
-
- def forward(self, x, tile_mode): # 1.7G
- n, c, h0, w0 = x.shape
- if (tile_mode == 0): # 不tile
- ph = ((h0 - 1) // 4 + 1) * 4
- pw = ((w0 - 1) // 4 + 1) * 4
- x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect') # 需要保证被2整除
- x = self.unet1.forward(x)
- x0 = self.unet2.forward(x)
- x1 = F.pad(x, (-20, -20, -20, -20))
- x = torch.add(x0, x1)
- if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 3, :w0 * 3]
- return x
- elif (tile_mode == 1): # 对长边减半
- if (w0 >= h0):
- crop_size_w = ((w0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除
- crop_size_h = (h0 - 1) // 4 * 4 + 4 # 能被4整除
- else:
- crop_size_h = ((h0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除
- crop_size_w = (w0 - 1) // 4 * 4 + 4 # 能被4整除
- crop_size = (crop_size_h, crop_size_w) # 6.6G
- elif (tile_mode == 2): # hw都减半
- crop_size = (((h0 - 1) // 8 * 8 + 8) // 2, ((w0 - 1) // 8 * 8 + 8) // 2) # 5.6G
- elif (tile_mode == 3): # hw都三分之一
- crop_size = (((h0 - 1) // 12 * 12 + 12) // 3, ((w0 - 1) // 12 * 12 + 12) // 3) # 4.2G
- elif (tile_mode == 4): # hw都四分之一
- crop_size = (((h0 - 1) // 16 * 16 + 16) // 4, ((w0 - 1) // 16 * 16 + 16) // 4) # 3.7G
- ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
- pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
- x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect')
- n, c, h, w = x.shape
- se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device)
- if ("Half" in x.type()):
- se_mean0 = se_mean0.half()
- n_patch = 0
- tmp_dict = {}
- opt_res_dict = {}
- for i in range(0, h - 28, crop_size[0]):
- tmp_dict[i] = {}
- for j in range(0, w - 28, crop_size[1]):
- x_crop = x[:, :, i:i + crop_size[0] + 28, j:j + crop_size[1] + 28]
- n, c1, h1, w1 = x_crop.shape
- tmp0, x_crop = self.unet1.forward_a(x_crop)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True)
- se_mean0 += tmp_se_mean
- n_patch += 1
- tmp_dict[i][j] = (tmp0, x_crop)
- se_mean0 /= n_patch
- se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean1 = se_mean1.half()
- for i in range(0, h - 28, crop_size[0]):
- for j in range(0, w - 28, crop_size[1]):
- tmp0, x_crop = tmp_dict[i][j]
- x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0)
- opt_unet1 = self.unet1.forward_b(tmp0, x_crop)
- tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True)
- se_mean1 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2)
- se_mean1 /= n_patch
- se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean0 = se_mean0.half()
- for i in range(0, h - 28, crop_size[0]):
- for j in range(0, w - 28, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j]
- tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1)
- tmp_x3 = self.unet2.forward_b(tmp_x2)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True)
- se_mean0 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3)
- se_mean0 /= n_patch
- se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean1 = se_mean1.half()
- for i in range(0, h - 28, crop_size[0]):
- for j in range(0, w - 28, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j]
- tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0)
- tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True)
- se_mean1 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4)
- se_mean1 /= n_patch
- for i in range(0, h - 28, crop_size[0]):
- opt_res_dict[i] = {}
- for j in range(0, w - 28, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j]
- tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1)
- x0 = self.unet2.forward_d(tmp_x1, tmp_x4)
- x1 = F.pad(opt_unet1, (-20, -20, -20, -20))
- x_crop = torch.add(x0, x1) # x0是unet2的最终输出
- opt_res_dict[i][j] = x_crop #
- del tmp_dict
- torch.cuda.empty_cache()
- res = torch.zeros((n, c, h * 3 - 84, w * 3 - 84)).to(x.device)
- if ("Half" in x.type()):
- res = res.half()
- for i in range(0, h - 28, crop_size[0]):
- for j in range(0, w - 28, crop_size[1]):
- res[:, :, i * 3:i * 3 + h1 * 3 - 84, j * 3:j * 3 + w1 * 3 - 84] = opt_res_dict[i][j]
- del opt_res_dict
- torch.cuda.empty_cache()
- if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 3, :w0 * 3]
- return res
-
-
-class UpCunet4x(nn.Module): # 完美tile,全程无损
- def __init__(self, in_channels=3, out_channels=3):
- super(UpCunet4x, self).__init__()
- self.unet1 = UNet1(in_channels, 64, deconv=True)
- self.unet2 = UNet2(64, 64, deconv=False)
- self.ps = nn.PixelShuffle(2)
- self.conv_final = nn.Conv2d(64, 12, 3, 1, padding=0, bias=True)
-
- def forward(self, x, tile_mode):
- n, c, h0, w0 = x.shape
- x00 = x
- if (tile_mode == 0): # 不tile
- ph = ((h0 - 1) // 2 + 1) * 2
- pw = ((w0 - 1) // 2 + 1) * 2
- x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect') # 需要保证被2整除
- x = self.unet1.forward(x)
- x0 = self.unet2.forward(x)
- x1 = F.pad(x, (-20, -20, -20, -20))
- x = torch.add(x0, x1)
- x = self.conv_final(x)
- x = F.pad(x, (-1, -1, -1, -1))
- x = self.ps(x)
- if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 4, :w0 * 4]
- x += F.interpolate(x00, scale_factor=4, mode='nearest')
- return x
- elif (tile_mode == 1): # 对长边减半
- if (w0 >= h0):
- crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
- crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除
- else:
- crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除
- crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除
- crop_size = (crop_size_h, crop_size_w) # 6.6G
- elif (tile_mode == 2): # hw都减半
- crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G
- elif (tile_mode == 3): # hw都三分之一
- crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.1G
- elif (tile_mode == 4): # hw都四分之一
- crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G
- ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0]
- pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1]
- x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect')
- n, c, h, w = x.shape
- se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device)
- if ("Half" in x.type()):
- se_mean0 = se_mean0.half()
- n_patch = 0
- tmp_dict = {}
- opt_res_dict = {}
- for i in range(0, h - 38, crop_size[0]):
- tmp_dict[i] = {}
- for j in range(0, w - 38, crop_size[1]):
- x_crop = x[:, :, i:i + crop_size[0] + 38, j:j + crop_size[1] + 38]
- n, c1, h1, w1 = x_crop.shape
- tmp0, x_crop = self.unet1.forward_a(x_crop)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True)
- se_mean0 += tmp_se_mean
- n_patch += 1
- tmp_dict[i][j] = (tmp0, x_crop)
- se_mean0 /= n_patch
- se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean1 = se_mean1.half()
- for i in range(0, h - 38, crop_size[0]):
- for j in range(0, w - 38, crop_size[1]):
- tmp0, x_crop = tmp_dict[i][j]
- x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0)
- opt_unet1 = self.unet1.forward_b(tmp0, x_crop)
- tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True)
- se_mean1 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2)
- se_mean1 /= n_patch
- se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean0 = se_mean0.half()
- for i in range(0, h - 38, crop_size[0]):
- for j in range(0, w - 38, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j]
- tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1)
- tmp_x3 = self.unet2.forward_b(tmp_x2)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True)
- se_mean0 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3)
- se_mean0 /= n_patch
- se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64
- if ("Half" in x.type()):
- se_mean1 = se_mean1.half()
- for i in range(0, h - 38, crop_size[0]):
- for j in range(0, w - 38, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j]
- tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0)
- tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3)
- if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor
- tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half()
- else:
- tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True)
- se_mean1 += tmp_se_mean
- tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4)
- se_mean1 /= n_patch
- for i in range(0, h - 38, crop_size[0]):
- opt_res_dict[i] = {}
- for j in range(0, w - 38, crop_size[1]):
- opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j]
- tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1)
- x0 = self.unet2.forward_d(tmp_x1, tmp_x4)
- x1 = F.pad(opt_unet1, (-20, -20, -20, -20))
- x_crop = torch.add(x0, x1) # x0是unet2的最终输出
- x_crop = self.conv_final(x_crop)
- x_crop = F.pad(x_crop, (-1, -1, -1, -1))
- x_crop = self.ps(x_crop)
- opt_res_dict[i][j] = x_crop
- del tmp_dict
- torch.cuda.empty_cache()
- res = torch.zeros((n, c, h * 4 - 152, w * 4 - 152)).to(x.device)
- if ("Half" in x.type()):
- res = res.half()
- for i in range(0, h - 38, crop_size[0]):
- for j in range(0, w - 38, crop_size[1]):
- # print(opt_res_dict[i][j].shape,res[:, :, i * 4:i * 4 + h1 * 4 - 144, j * 4:j * 4 + w1 * 4 - 144].shape)
- res[:, :, i * 4:i * 4 + h1 * 4 - 152, j * 4:j * 4 + w1 * 4 - 152] = opt_res_dict[i][j]
- del opt_res_dict
- torch.cuda.empty_cache()
- if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 4, :w0 * 4]
- res += F.interpolate(x00, scale_factor=4, mode='nearest')
- return res #
-
-
-class RealWaifuUpScaler(object):
- def __init__(self, scale, weight_path, half, device):
- weight = torch.load(weight_path, map_location="cpu")
- self.model = eval("UpCunet%sx" % scale)()
- if (half == True):
- self.model = self.model.half().to(device)
- else:
- self.model = self.model.to(device)
- self.model.load_state_dict(weight, strict=True)
- self.model.eval()
- self.half = half
- self.device = device
-
- def np2tensor(self, np_frame):
- if (self.half == False):
- return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).float() / 255
- else:
- return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).half() / 255
-
- def tensor2np(self, tensor):
- if (self.half == False):
- return (
- np.transpose((tensor.data.squeeze() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(), (1, 2, 0)))
- else:
- return (np.transpose((tensor.data.squeeze().float() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(),
- (1, 2, 0)))
-
- def __call__(self, frame, tile_mode):
- with torch.no_grad():
- tensor = self.np2tensor(frame)
- result = self.tensor2np(self.model(tensor, tile_mode))
- return result
-
-
-if __name__ == "__main__":
- ###########inference_img
- import time, cv2, sys
- from time import time as ttime
-
- for weight_path, scale in [("weights_v3/up2x-latest-denoise3x.pth", 2), ("weights_v3/up3x-latest-denoise3x.pth", 3),
- ("weights_v3/up4x-latest-denoise3x.pth", 4)]:
- for tile_mode in [0, 1, 2, 3, 4]:
- upscaler2x = RealWaifuUpScaler(scale, weight_path, half=True, device="cuda:0")
- input_dir = "%s/input_dir1" % root_path
- output_dir = "%s/opt-dir-all-test" % root_path
- os.makedirs(output_dir, exist_ok=True)
- for name in os.listdir(input_dir):
- print(name)
- tmp = name.split(".")
- inp_path = os.path.join(input_dir, name)
- suffix = tmp[-1]
- prefix = ".".join(tmp[:-1])
- tmp_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix))
- print(inp_path, tmp_path)
- # 支持中文路径
- # os.link(inp_path, tmp_path)#win用硬链接
- os.symlink(inp_path, tmp_path) # linux用软链接
- frame = cv2.imread(tmp_path)[:, :, [2, 1, 0]]
- t0 = ttime()
- result = upscaler2x(frame, tile_mode=tile_mode)[:, :, ::-1]
- t1 = ttime()
- print(prefix, "done", t1 - t0)
- tmp_opt_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix))
- cv2.imwrite(tmp_opt_path, result)
- n = 0
- while (1):
- if (n == 0):
- suffix = "_%sx_tile%s.png" % (scale, tile_mode)
- else:
- suffix = "_%sx_tile%s_%s.png" % (scale, tile_mode, n) #
- if (os.path.exists(os.path.join(output_dir, prefix + suffix)) == False):
- break
- else:
- n += 1
- final_opt_path = os.path.join(output_dir, prefix + suffix)
- os.rename(tmp_opt_path, final_opt_path)
- os.remove(tmp_path)
diff --git a/spaces/Wrathless/Dkrotzer-MusicalMagic/tests/modules/test_rope.py b/spaces/Wrathless/Dkrotzer-MusicalMagic/tests/modules/test_rope.py
deleted file mode 100644
index b9a54aec8b38a257ba28053afccf305a60691bfc..0000000000000000000000000000000000000000
--- a/spaces/Wrathless/Dkrotzer-MusicalMagic/tests/modules/test_rope.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch
-
-from audiocraft.modules.rope import RotaryEmbedding
-from audiocraft.modules.transformer import StreamingTransformer
-
-
-def test_rope():
- B, T, H, C = 8, 75, 16, 128
-
- rope = RotaryEmbedding(dim=C)
- xq = torch.rand((B, T, H, C))
- xk = torch.rand((B, T, H, C))
- xq_out, xk_out = rope.rotate_qk(xq, xk, start=7)
-
- assert list(xq_out.shape) == [B, T, H, C]
- assert list(xk_out.shape) == [B, T, H, C]
-
-
-def test_rope_io_dtypes():
- B, T, H, C = 8, 75, 16, 128
-
- rope_32 = RotaryEmbedding(dim=C, dtype=torch.float32)
- rope_64 = RotaryEmbedding(dim=C, dtype=torch.float64)
-
- # Test bfloat16 inputs w/ both 32 and 64 precision rope.
- xq_16 = torch.rand((B, T, H, C)).to(torch.bfloat16)
- xk_16 = torch.rand((B, T, H, C)).to(torch.bfloat16)
- xq_out, xk_out = rope_32.rotate_qk(xq_16, xk_16)
- assert xq_out.dtype == torch.bfloat16
- xq_out, xk_out = rope_64.rotate_qk(xq_16, xk_16)
- assert xq_out.dtype == torch.bfloat16
-
- # Test float32 inputs w/ both 32 and 64 precision rope.
- xq_32 = torch.rand((B, T, H, C)).to(torch.float32)
- xk_32 = torch.rand((B, T, H, C)).to(torch.float32)
- xq_out, xk_out = rope_32.rotate_qk(xq_32, xk_32)
- assert xq_out.dtype == torch.float32
- xq_out, xk_out = rope_64.rotate_qk(xq_32, xk_32)
- assert xq_out.dtype == torch.float32
-
-
-def test_transformer_with_rope():
- torch.manual_seed(1234)
- for pos in ['rope', 'sin_rope']:
- tr = StreamingTransformer(
- 16, 4, 2, custom=True, dropout=0., layer_scale=0.1,
- positional_embedding=pos)
- tr.eval()
- steps = 12
- x = torch.randn(3, steps, 16)
-
- out = tr(x)
- assert list(out.shape) == list(x.shape)
-
-
-@torch.no_grad()
-def test_rope_streaming():
- torch.manual_seed(1234)
- tr = StreamingTransformer(
- 16, 4, 2, causal=True, dropout=0.,
- custom=True, positional_embedding='rope')
- tr.eval()
- steps = 12
- x = torch.randn(3, steps, 16)
-
- ref = tr(x)
-
- with tr.streaming():
- outs = []
- frame_sizes = [1] * steps
-
- for frame_size in frame_sizes:
- frame = x[:, :frame_size]
- x = x[:, frame_size:]
- outs.append(tr(frame))
-
- out = torch.cat(outs, dim=1)
- assert list(out.shape) == [3, steps, 16]
- delta = torch.norm(out - ref) / torch.norm(out)
- assert delta < 1e-6, delta
-
-
-@torch.no_grad()
-def test_rope_streaming_past_context():
- torch.manual_seed(1234)
-
- for context in [None, 10]:
- tr = StreamingTransformer(
- 16, 4, 1 if context else 2,
- causal=True, past_context=context, custom=True,
- dropout=0., positional_embedding='rope')
- tr.eval()
-
- steps = 20
- x = torch.randn(3, steps, 16)
- ref = tr(x)
-
- with tr.streaming():
- outs = []
- frame_sizes = [1] * steps
-
- for frame_size in frame_sizes:
- frame = x[:, :frame_size]
- x = x[:, frame_size:]
- outs.append(tr(frame))
-
- out = torch.cat(outs, dim=1)
- assert list(out.shape) == [3, steps, 16]
- delta = torch.norm(out - ref) / torch.norm(out)
- assert delta < 1e-6, delta
-
-
-def test_rope_memory_efficient():
- torch.manual_seed(1234)
- tr = StreamingTransformer(
- 16, 4, 2, custom=True, dropout=0., layer_scale=0.1,
- positional_embedding='rope')
- tr_mem_efficient = StreamingTransformer(
- 16, 4, 2, dropout=0., memory_efficient=True, layer_scale=0.1,
- positional_embedding='rope')
- tr_mem_efficient.load_state_dict(tr.state_dict())
- tr.eval()
- steps = 12
- x = torch.randn(3, steps, 16)
-
- with torch.no_grad():
- y = tr(x)
- y2 = tr_mem_efficient(x)
- # Check at float precision b/c this is the rope default.
- assert torch.allclose(y, y2, atol=1e-7), (y - y2).norm()
-
-
-def test_rope_with_xpos():
- B, T, H, C = 8, 75, 16, 128
-
- rope = RotaryEmbedding(dim=C, xpos=True)
- xq = torch.rand((B, T, H, C))
- xk = torch.rand((B, T, H, C))
- xq_out, xk_out = rope.rotate_qk(xq, xk, start=7)
-
- assert list(xq_out.shape) == [B, T, H, C]
- assert list(xk_out.shape) == [B, T, H, C]
-
-
-def test_positional_scale():
- B, T, H, C = 8, 75, 16, 128
-
- rope = RotaryEmbedding(dim=C, xpos=True, scale=0.0)
- xq = torch.rand((B, T, H, C))
- xk = torch.rand((B, T, H, C))
- xq_out, xk_out = rope.rotate_qk(xq, xk, start=7)
-
- assert torch.allclose(xq, xq_out)
- assert torch.allclose(xk, xk_out)
diff --git a/spaces/Writer/instruct-palmyra-20b/app.py b/spaces/Writer/instruct-palmyra-20b/app.py
deleted file mode 100644
index 2ad0a217bf2ef9dc13cf993bef3796e7748caa97..0000000000000000000000000000000000000000
--- a/spaces/Writer/instruct-palmyra-20b/app.py
+++ /dev/null
@@ -1,194 +0,0 @@
-import os
-import gradio as gr
-import requests
-
-theme = gr.themes.Soft(
- primary_hue="indigo",
- secondary_hue="blue",
- neutral_hue="slate",
- radius_size=gr.themes.sizes.radius_sm,
- font=[
- gr.themes.GoogleFont("Open Sans"),
- "ui-sans-serif",
- "system-ui",
- "sans-serif",
- ],
- text_size=gr.themes.sizes.text_lg,
-)
-
-API_URL = os.environ.get("URL")
-api_key = os.environ.get("API_KEY")
-if API_URL is None:
- raise ValueError("Please set the API_URL environment variable")
-if api_key is None:
- raise ValueError("Please set the API_KEY environment variable")
-
-
-headers = {"Authorization": f"Api-Key {api_key}", "Content-Type": "text/plain"}
-
-
-DEFAULT_STREAM_ENCODING = "utf-8"
-prompt_template = (
- "Below is an instruction that describes a task. "
- "Write a response that appropriately completes the request.\n\n"
- "### Instruction:\n{instruction}\n\n### Response:"
-)
-
-
-def generate(
- prompt,
- temperature=0.9,
- max_new_tokens=256,
- top_p=0.95,
- repetition_penalty=1.0,
-):
- if not prompt:
- return ""
- input_txt = prompt_template.format(instruction=prompt)
- request_body = {
- "prompt": input_txt,
- "temperature": temperature,
- "max_tokens": max_new_tokens,
- "top_p": top_p,
- # "top_k": top_k,
- "frequency_penalty": repetition_penalty,
- }
- # print(request_body)
- try:
- response = requests.post(
- API_URL,
- json=request_body,
- headers=headers,
- stream=True,
- )
- if response.headers.get("transfer-encoding") == "chunked":
- output = ""
- for chunk in response.iter_content(chunk_size=8192, decode_unicode=True):
- if isinstance(chunk, bytes):
- output += chunk.decode(response.encoding or DEFAULT_STREAM_ENCODING)
- else:
- output += chunk
- # print(output)
- yield output
-
- return response.text
- except Exception as e:
- print(e)
- return
-
-
-examples = [
- "Alien Anthropologists - Imagine that an extraterrestrial species has been studying Earth for centuries. Describe their observations of humanity's most significant cultural, political, and technological achievements, along with their analysis on how these developments have shaped the interconnected global society.",
- "Imagine a futuristic world where humanity has discovered a way to harness the power of a distant black hole to solve Earth's ongoing energy crisis. Discuss the ethical implications and potential consequences.",
- "Create a policy framework addressing ethical concerns of merging human intelligence with advanced AI systems.",
- "In Schrödinger's cat paradox, what occurs if an observer looks inside the box?",
- "Write a Python function that checks whether a given input string is a palindrome or not.",
-]
-
-
-def process_example(args):
- for x in generate(args):
- pass
- return x
-
-
-css = ".generating {visibility: hidden}"
-
-monospace_css = """
-#q-input textarea {
- font-family: Poppins,sans-serif, Arial, 'Consolas', Courier, monospace;
-}
-"""
-
-
-css += monospace_css
-
-description = """
-
-"""
-
-with gr.Blocks(theme=theme, analytics_enabled=False, css=css) as demo:
- with gr.Column():
- gr.Markdown(description)
- with gr.Row():
- with gr.Column():
- instruction = gr.Textbox(
- placeholder="Enter your prompt here",
- lines=2,
- label="Input",
- elem_id="q-input",
- scale=2,
- )
- submit = gr.Button("Generate", variant="primary")
- output = gr.Textbox(lines=10, label="Output", scale=2)
- with gr.Row():
- with gr.Column():
- with gr.Accordion("Advanced settings", open=False):
- with gr.Row():
- column_1, column_2 = gr.Column(), gr.Column()
- with column_1:
- temperature = gr.Slider(
- label="Temperature",
- value=0.9,
- minimum=0.1,
- maximum=1.0,
- step=0.05,
- interactive=True,
- info="Higher values produce more diverse outputs",
- )
- max_new_tokens = gr.Slider(
- label="Max new tokens",
- value=256,
- minimum=0,
- maximum=2048,
- step=64,
- interactive=True,
- info="The maximum numbers of new tokens",
- )
- with column_2:
- top_p = gr.Slider(
- label="Top-p (nucleus sampling)",
- value=0.95,
- minimum=0.1,
- maximum=1,
- step=0.05,
- interactive=True,
- info="Higher values sample more low-probability tokens",
- )
- repetition_penalty = gr.Slider(
- label="Repetition penalty",
- value=1.0,
- minimum=1.0,
- maximum=2.0,
- step=0.05,
- interactive=True,
- info="Penalize repeated tokens",
- )
-
- gr.Examples(
- examples=examples,
- inputs=[instruction],
- cache_examples=False,
- fn=process_example,
- outputs=[output],
- )
-
- submit.click(
- generate,
- inputs=[
- instruction,
- temperature,
- max_new_tokens,
- top_p,
- repetition_penalty,
- ],
- outputs=[output],
- )
-demo.queue(concurrency_count=16).launch(debug=True)
diff --git a/spaces/Xinyoumeng233hu/SteganographywithGPT-2/run_single.py b/spaces/Xinyoumeng233hu/SteganographywithGPT-2/run_single.py
deleted file mode 100644
index 17421e6b19c603e4316879e764787f489a00b092..0000000000000000000000000000000000000000
--- a/spaces/Xinyoumeng233hu/SteganographywithGPT-2/run_single.py
+++ /dev/null
@@ -1,166 +0,0 @@
-import numpy as np
-import bitarray
-import sys
-import re
-import math
-from meteor import encode_meteor,decode_meteor
-from utils import get_model, encode_context
-from arithmetic import encode_arithmetic, decode_arithmetic
-from block_baseline import get_bins, encode_block, decode_block
-from huffman_baseline import encode_huffman, decode_huffman
-from sample import sample
-
-def encode_message(mode, message_str, context):
- enc, model = get_model(model_name='gpt2')
- ## PARAMETERS
- # message_str = input("input secret message:")
- unicode_enc = False
- # mode = 'meteor'
- # mode = input("Please enter mode (arithmetic, huffman, bins, or sample): ")
- block_size = 3 # for huffman and bins
- temp = 0.9 # for arithmetic
- precision = 26 # for arithmetic
- sample_tokens = 100 # for sample
- topk = 300
- finish_sent=True # whether or not to force finish sent. If so, stats displayed will be for non-finished sentence
- meteor_sort = False
- meteor_random = False
-
- key = b'0x01'*64
- sample_seed_prefix = b'sample'
- nonce = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-
- ## VALIDATE PARAMETERS
- if mode not in ['meteor', 'arithmetic', 'huffman', 'bins']:
- raise NotImplementedError
-
- if mode == 'bins':
- bin2words, words2bin = get_bins(len(enc.encoder), block_size)
-
-# context = \
-# """Washington received his initial military training and command with the Virginia Regiment during the French and Indian War. He was later elected to the Virginia House of Burgesses and was named a delegate to the Continental Congress, where he was appointed Commanding General of the nation's Continental Army. Washington led American forces, allied with France, in the defeat of the British at Yorktown. Once victory for the United States was in hand in 1783, Washington resigned his commission.
-# """
- # context = "Despite a long history of research and wide-spread applications to censorship resistant systems, practical steganographic systems capable of embedding messages into realistic communication distributions, like text, do not exist." #@param ["Washington received his initial military training and command with the Virginia Regiment during the French and Indian War. He was later elected to the Virginia House of Burgesses and was named a delegate to the Continental Congress, where he was appointed Commanding General of the nation's Continental Army. Washington led American forces, allied with France, in the defeat of the British at Yorktown. Once victory for the United States was in hand in 1783, Washington resigned his commission.", "The Alvarez hypothesis posits that the mass extinction of the dinosaurs and many other living things during the Cretaceous-Paleogene extinction event was caused by the impact of a large asteroid on the Earth. Prior to 2013, it was commonly cited as having happened about 65 million years ago, but Renne and colleagues (2013) gave an updated value of 66 million years. Evidence indicates that the asteroid fell in the Yucatan Peninsula, at Chicxulub, Mexico. The hypothesis is named after the father-and-son team of scientists Luis and Walter Alvarez, who first suggested it in 1980. Shortly afterwards, and independently, the same was suggested by Dutch paleontologist Jan Smit.", "Despite a long history of research and wide-spread applications to censorship resistant systems, practical steganographic systems capable of embedding messages into realistic communication distributions, like text, do not exist."] {allow-input: true}
- context_tokens = encode_context(context, enc)
- # ------------------------------------------------------------------------------------
- # ------------------------------------------------------------------------------------
- # First encode message to uniform bits, without any context
- # (not essential this is arithmetic vs ascii, but it's more efficient when the message is natural language)
- if unicode_enc:
- ba = bitarray.bitarray()
- ba.frombytes(message_str.encode('utf-8'))
- message = ba.tolist()
- else:
- message_ctx = [enc.encoder['<|endoftext|>']]
- message_str += ''
- message = decode_arithmetic(model, enc, message_str, message_ctx, precision=40, topk=60000)
- # Next encode bits into cover text, using arbitrary context
- Hq = 0
- if mode == 'arithmetic':
- out, nll, kl, words_per_bit = encode_arithmetic(model, enc, message, context_tokens, temp=temp, finish_sent=finish_sent, precision=precision, topk=topk)
- elif mode == 'huffman':
- out, nll, kl, words_per_bit = encode_huffman(model, enc, message, context_tokens, block_size, finish_sent=finish_sent)
- elif mode == 'bins':
- out, nll, kl, words_per_bit = encode_block(model, enc, message, context_tokens, block_size, bin2words, words2bin, finish_sent=finish_sent)
- elif mode == 'meteor':
- out, nll, kl, words_per_bit = encode_meteor(model, enc, message, context_tokens, temp=temp, finish_sent=finish_sent,
- precision=precision, topk=topk, is_sort=meteor_sort, randomize_key=meteor_random, input_key=key, input_nonce=nonce)
- elif mode == 'sample':
- out, nll, kl, Hq = sample(model, enc, sample_tokens, context_tokens, temperature=temp, topk=topk)
- words_per_bit = 1
- text = enc.decode(out)
-
- # print(message)
- # print(len(message))
- print("="*40 + " Encoding " + "="*40)
- print(text)
- print('ppl: %0.2f, kl: %0.3f, words/bit: %0.2f, bits/word: %0.2f, entropy: %.2f' % (math.exp(nll), kl, words_per_bit, 1/words_per_bit, Hq/0.69315))
-
- stats = {
- "ppl": math.exp(nll),
- "kl": kl,
- "wordsbit": words_per_bit,
- "entropy": Hq/0.69315
- }
- # return text, stats
- return text,stats["ppl"], stats["kl"], stats["wordsbit"]
-
-def decode_message(mode, text, context):
- enc, model = get_model(model_name='gpt2')
- ## PARAMETERS
- unicode_enc = False
- # mode = 'meteor'
- block_size = 3 # for huffman and bins
- temp = 0.9 # for arithmetic
- precision = 26 # for arithmetic
- sample_tokens = 100 # for sample
- topk = 300
- finish_sent=True # whether or not to force finish sent. If so, stats displayed will be for non-finished sentence
- meteor_sort = False
- meteor_random = False
-
- key = b'0x01'*64
- sample_seed_prefix = b'sample'
- nonce = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
-
- ## VALIDATE PARAMETERS
- if mode not in ['meteor', 'arithmetic', 'huffman', 'bins', 'sample']:
- raise NotImplementedError
- if mode == 'bins':
- bin2words, words2bin = get_bins(len(enc.encoder), block_size)
-
- context_tokens = encode_context(context, enc)
-
- if mode != 'sample':
- if mode == 'arithmetic':
- message_rec = decode_arithmetic(model, enc, text, context_tokens, temp=temp, precision=precision, topk=topk)
- elif mode == 'huffman':
- message_rec = decode_huffman(model, enc, text, context_tokens, block_size)
- elif mode == 'bins':
- message_rec = decode_block(model, enc, text, context_tokens, block_size, bin2words, words2bin)
- elif mode == 'meteor':
- message_rec = decode_meteor(model, enc, text, context_tokens, temp=temp,
- precision=precision, topk=topk, is_sort=meteor_sort, input_key=key, input_nonce=nonce)
-
- print("="*35 + " Recovered Message " + "="*35)
- # print(message_rec)
- # print("=" * 80)
- # Finally map message bits back to original text
- if unicode_enc:
- message_rec = [bool(item) for item in message_rec]
- ba = bitarray.bitarray(message_rec)
- reconst = ba.tobytes().decode('utf-8', 'ignore')
- else:
- message_ctx = [enc.encoder['<|endoftext|>']]
- reconst = encode_arithmetic(model, enc, message_rec, message_ctx, precision=40, topk=60000)
- reconst = enc.decode(reconst[0])
- print(reconst[:-5])
- print("=" * 80)
- return reconst[:-5]
-
-# def main():
-# chosen_context = "Despite a long history of research and wide-spread applications to censorship resistant systems, practical steganographic systems capable of embedding messages into realistic communication distributions, like text, do not exist." #@param ["Washington received his initial military training and command with the Virginia Regiment during the French and Indian War. He was later elected to the Virginia House of Burgesses and was named a delegate to the Continental Congress, where he was appointed Commanding General of the nation's Continental Army. Washington led American forces, allied with France, in the defeat of the British at Yorktown. Once victory for the United States was in hand in 1783, Washington resigned his commission.", "The Alvarez hypothesis posits that the mass extinction of the dinosaurs and many other living things during the Cretaceous-Paleogene extinction event was caused by the impact of a large asteroid on the Earth. Prior to 2013, it was commonly cited as having happened about 65 million years ago, but Renne and colleagues (2013) gave an updated value of 66 million years. Evidence indicates that the asteroid fell in the Yucatan Peninsula, at Chicxulub, Mexico. The hypothesis is named after the father-and-son team of scientists Luis and Walter Alvarez, who first suggested it in 1980. Shortly afterwards, and independently, the same was suggested by Dutch paleontologist Jan Smit.", "Despite a long history of research and wide-spread applications to censorship resistant systems, practical steganographic systems capable of embedding messages into realistic communication distributions, like text, do not exist."] {allow-input: true}
-# # #@title { run: "auto", display-mode: "form" }
-# message_text = "generate text!" #@param {type:"string"}
-# mode = input("Please enter mode (meteor, arithmetic, huffman, bins, or sample): ")
-# #@title Run me!
-# #@markdown Make sure to re-run this cell if you change the parameters above.
-# x = encode_message(mode, message_text, chosen_context)
-# # print(x[0])
-# y = decode_message(mode, x[0], chosen_context)
-
-# if __name__ == '__main__':
-# main()
-
-# chosen_context = "Despite a long history of research and wide-spread applications to censorship resistant systems, practical steganographic systems capable of embedding messages into realistic communication distributions, like text, do not exist." #@param ["Washington received his initial military training and command with the Virginia Regiment during the French and Indian War. He was later elected to the Virginia House of Burgesses and was named a delegate to the Continental Congress, where he was appointed Commanding General of the nation's Continental Army. Washington led American forces, allied with France, in the defeat of the British at Yorktown. Once victory for the United States was in hand in 1783, Washington resigned his commission.", "The Alvarez hypothesis posits that the mass extinction of the dinosaurs and many other living things during the Cretaceous-Paleogene extinction event was caused by the impact of a large asteroid on the Earth. Prior to 2013, it was commonly cited as having happened about 65 million years ago, but Renne and colleagues (2013) gave an updated value of 66 million years. Evidence indicates that the asteroid fell in the Yucatan Peninsula, at Chicxulub, Mexico. The hypothesis is named after the father-and-son team of scientists Luis and Walter Alvarez, who first suggested it in 1980. Shortly afterwards, and independently, the same was suggested by Dutch paleontologist Jan Smit.", "Despite a long history of research and wide-spread applications to censorship resistant systems, practical steganographic systems capable of embedding messages into realistic communication distributions, like text, do not exist."] {allow-input: true}
-# # chosen_context = "Washington received his initial military training and command with the Virginia Regiment during the French and Indian War. He was later elected to the Virginia House of Burgesses and was named a delegate to the Continental Congress, where he was appointed Commanding General of the nation's Continental Army. Washington led American forces, allied with France, in the defeat of the British at Yorktown. Once victory for the United States was in hand in 1783, Washington resigned his commission."
-# # chosen_context += "\n\n" # to add a little spacing
-
-# # #@title { run: "auto", display-mode: "form" }
-# message_text = "generate text!" #@param {type:"string"}
-# mode = input("Please enter mode (arithmetic, huffman, bins, or sample): ")
-# #@title Run me!
-# #@markdown Make sure to re-run this cell if you change the parameters above.
-# x = encode_message(mode, message_text, chosen_context)
-# # print(x[0])
-# y = decode_message(mode, x[0], chosen_context)
\ No newline at end of file
diff --git a/spaces/XzJosh/Jianmo-Bert-VITS2/text/english_bert_mock.py b/spaces/XzJosh/Jianmo-Bert-VITS2/text/english_bert_mock.py
deleted file mode 100644
index 3b894ced5b6d619a18d6bdd7d7606ba9e6532050..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/Jianmo-Bert-VITS2/text/english_bert_mock.py
+++ /dev/null
@@ -1,5 +0,0 @@
-import torch
-
-
-def get_bert_feature(norm_text, word2ph):
- return torch.zeros(1024, sum(word2ph))
diff --git a/spaces/XzJosh/LittleTaffy-Bert-VITS2/text/cleaner.py b/spaces/XzJosh/LittleTaffy-Bert-VITS2/text/cleaner.py
deleted file mode 100644
index 64bd5f7296f66c94f3a335666c53706bb5fe5b39..0000000000000000000000000000000000000000
--- a/spaces/XzJosh/LittleTaffy-Bert-VITS2/text/cleaner.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from text import chinese, cleaned_text_to_sequence
-
-
-language_module_map = {
- 'ZH': chinese
-}
-
-
-def clean_text(text, language):
- language_module = language_module_map[language]
- norm_text = language_module.text_normalize(text)
- phones, tones, word2ph = language_module.g2p(norm_text)
- return norm_text, phones, tones, word2ph
-
-def clean_text_bert(text, language):
- language_module = language_module_map[language]
- norm_text = language_module.text_normalize(text)
- phones, tones, word2ph = language_module.g2p(norm_text)
- bert = language_module.get_bert_feature(norm_text, word2ph)
- return phones, tones, bert
-
-def text_to_sequence(text, language):
- norm_text, phones, tones, word2ph = clean_text(text, language)
- return cleaned_text_to_sequence(phones, tones, language)
-
-if __name__ == '__main__':
- pass
diff --git a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/lvis_v1_categories.py b/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/lvis_v1_categories.py
deleted file mode 100644
index 7374e6968bb006f5d8c49e75d9d3b31ea3d77d05..0000000000000000000000000000000000000000
--- a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/data/datasets/lvis_v1_categories.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-# Autogen with
-# with open("lvis_v1_val.json", "r") as f:
-# a = json.load(f)
-# c = a["categories"]
-# for x in c:
-# del x["image_count"]
-# del x["instance_count"]
-# LVIS_CATEGORIES = repr(c) + " # noqa"
-# with open("/tmp/lvis_categories.py", "wt") as f:
-# f.write(f"LVIS_CATEGORIES = {LVIS_CATEGORIES}")
-# Then paste the contents of that file below
-
-# fmt: off
-LVIS_CATEGORIES = [{'frequency': 'c', 'synset': 'aerosol.n.02', 'synonyms': ['aerosol_can', 'spray_can'], 'id': 1, 'def': 'a dispenser that holds a substance under pressure', 'name': 'aerosol_can'}, {'frequency': 'f', 'synset': 'air_conditioner.n.01', 'synonyms': ['air_conditioner'], 'id': 2, 'def': 'a machine that keeps air cool and dry', 'name': 'air_conditioner'}, {'frequency': 'f', 'synset': 'airplane.n.01', 'synonyms': ['airplane', 'aeroplane'], 'id': 3, 'def': 'an aircraft that has a fixed wing and is powered by propellers or jets', 'name': 'airplane'}, {'frequency': 'f', 'synset': 'alarm_clock.n.01', 'synonyms': ['alarm_clock'], 'id': 4, 'def': 'a clock that wakes a sleeper at some preset time', 'name': 'alarm_clock'}, {'frequency': 'c', 'synset': 'alcohol.n.01', 'synonyms': ['alcohol', 'alcoholic_beverage'], 'id': 5, 'def': 'a liquor or brew containing alcohol as the active agent', 'name': 'alcohol'}, {'frequency': 'c', 'synset': 'alligator.n.02', 'synonyms': ['alligator', 'gator'], 'id': 6, 'def': 'amphibious reptiles related to crocodiles but with shorter broader snouts', 'name': 'alligator'}, {'frequency': 'c', 'synset': 'almond.n.02', 'synonyms': ['almond'], 'id': 7, 'def': 'oval-shaped edible seed of the almond tree', 'name': 'almond'}, {'frequency': 'c', 'synset': 'ambulance.n.01', 'synonyms': ['ambulance'], 'id': 8, 'def': 'a vehicle that takes people to and from hospitals', 'name': 'ambulance'}, {'frequency': 'c', 'synset': 'amplifier.n.01', 'synonyms': ['amplifier'], 'id': 9, 'def': 'electronic equipment that increases strength of signals', 'name': 'amplifier'}, {'frequency': 'c', 'synset': 'anklet.n.03', 'synonyms': ['anklet', 'ankle_bracelet'], 'id': 10, 'def': 'an ornament worn around the ankle', 'name': 'anklet'}, {'frequency': 'f', 'synset': 'antenna.n.01', 'synonyms': ['antenna', 'aerial', 'transmitting_aerial'], 'id': 11, 'def': 'an electrical device that sends or receives radio or television signals', 'name': 'antenna'}, {'frequency': 'f', 'synset': 'apple.n.01', 'synonyms': ['apple'], 'id': 12, 'def': 'fruit with red or yellow or green skin and sweet to tart crisp whitish flesh', 'name': 'apple'}, {'frequency': 'r', 'synset': 'applesauce.n.01', 'synonyms': ['applesauce'], 'id': 13, 'def': 'puree of stewed apples usually sweetened and spiced', 'name': 'applesauce'}, {'frequency': 'r', 'synset': 'apricot.n.02', 'synonyms': ['apricot'], 'id': 14, 'def': 'downy yellow to rosy-colored fruit resembling a small peach', 'name': 'apricot'}, {'frequency': 'f', 'synset': 'apron.n.01', 'synonyms': ['apron'], 'id': 15, 'def': 'a garment of cloth that is tied about the waist and worn to protect clothing', 'name': 'apron'}, {'frequency': 'c', 'synset': 'aquarium.n.01', 'synonyms': ['aquarium', 'fish_tank'], 'id': 16, 'def': 'a tank/pool/bowl filled with water for keeping live fish and underwater animals', 'name': 'aquarium'}, {'frequency': 'r', 'synset': 'arctic.n.02', 'synonyms': ['arctic_(type_of_shoe)', 'galosh', 'golosh', 'rubber_(type_of_shoe)', 'gumshoe'], 'id': 17, 'def': 'a waterproof overshoe that protects shoes from water or snow', 'name': 'arctic_(type_of_shoe)'}, {'frequency': 'c', 'synset': 'armband.n.02', 'synonyms': ['armband'], 'id': 18, 'def': 'a band worn around the upper arm', 'name': 'armband'}, {'frequency': 'f', 'synset': 'armchair.n.01', 'synonyms': ['armchair'], 'id': 19, 'def': 'chair with a support on each side for arms', 'name': 'armchair'}, {'frequency': 'r', 'synset': 'armoire.n.01', 'synonyms': ['armoire'], 'id': 20, 'def': 'a large wardrobe or cabinet', 'name': 'armoire'}, {'frequency': 'r', 'synset': 'armor.n.01', 'synonyms': ['armor', 'armour'], 'id': 21, 'def': 'protective covering made of metal and used in combat', 'name': 'armor'}, {'frequency': 'c', 'synset': 'artichoke.n.02', 'synonyms': ['artichoke'], 'id': 22, 'def': 'a thistlelike flower head with edible fleshy leaves and heart', 'name': 'artichoke'}, {'frequency': 'f', 'synset': 'ashcan.n.01', 'synonyms': ['trash_can', 'garbage_can', 'wastebin', 'dustbin', 'trash_barrel', 'trash_bin'], 'id': 23, 'def': 'a bin that holds rubbish until it is collected', 'name': 'trash_can'}, {'frequency': 'c', 'synset': 'ashtray.n.01', 'synonyms': ['ashtray'], 'id': 24, 'def': "a receptacle for the ash from smokers' cigars or cigarettes", 'name': 'ashtray'}, {'frequency': 'c', 'synset': 'asparagus.n.02', 'synonyms': ['asparagus'], 'id': 25, 'def': 'edible young shoots of the asparagus plant', 'name': 'asparagus'}, {'frequency': 'c', 'synset': 'atomizer.n.01', 'synonyms': ['atomizer', 'atomiser', 'spray', 'sprayer', 'nebulizer', 'nebuliser'], 'id': 26, 'def': 'a dispenser that turns a liquid (such as perfume) into a fine mist', 'name': 'atomizer'}, {'frequency': 'f', 'synset': 'avocado.n.01', 'synonyms': ['avocado'], 'id': 27, 'def': 'a pear-shaped fruit with green or blackish skin and rich yellowish pulp enclosing a single large seed', 'name': 'avocado'}, {'frequency': 'c', 'synset': 'award.n.02', 'synonyms': ['award', 'accolade'], 'id': 28, 'def': 'a tangible symbol signifying approval or distinction', 'name': 'award'}, {'frequency': 'f', 'synset': 'awning.n.01', 'synonyms': ['awning'], 'id': 29, 'def': 'a canopy made of canvas to shelter people or things from rain or sun', 'name': 'awning'}, {'frequency': 'r', 'synset': 'ax.n.01', 'synonyms': ['ax', 'axe'], 'id': 30, 'def': 'an edge tool with a heavy bladed head mounted across a handle', 'name': 'ax'}, {'frequency': 'r', 'synset': 'baboon.n.01', 'synonyms': ['baboon'], 'id': 31, 'def': 'large terrestrial monkeys having doglike muzzles', 'name': 'baboon'}, {'frequency': 'f', 'synset': 'baby_buggy.n.01', 'synonyms': ['baby_buggy', 'baby_carriage', 'perambulator', 'pram', 'stroller'], 'id': 32, 'def': 'a small vehicle with four wheels in which a baby or child is pushed around', 'name': 'baby_buggy'}, {'frequency': 'c', 'synset': 'backboard.n.01', 'synonyms': ['basketball_backboard'], 'id': 33, 'def': 'a raised vertical board with basket attached; used to play basketball', 'name': 'basketball_backboard'}, {'frequency': 'f', 'synset': 'backpack.n.01', 'synonyms': ['backpack', 'knapsack', 'packsack', 'rucksack', 'haversack'], 'id': 34, 'def': 'a bag carried by a strap on your back or shoulder', 'name': 'backpack'}, {'frequency': 'f', 'synset': 'bag.n.04', 'synonyms': ['handbag', 'purse', 'pocketbook'], 'id': 35, 'def': 'a container used for carrying money and small personal items or accessories', 'name': 'handbag'}, {'frequency': 'f', 'synset': 'bag.n.06', 'synonyms': ['suitcase', 'baggage', 'luggage'], 'id': 36, 'def': 'cases used to carry belongings when traveling', 'name': 'suitcase'}, {'frequency': 'c', 'synset': 'bagel.n.01', 'synonyms': ['bagel', 'beigel'], 'id': 37, 'def': 'glazed yeast-raised doughnut-shaped roll with hard crust', 'name': 'bagel'}, {'frequency': 'r', 'synset': 'bagpipe.n.01', 'synonyms': ['bagpipe'], 'id': 38, 'def': 'a tubular wind instrument; the player blows air into a bag and squeezes it out', 'name': 'bagpipe'}, {'frequency': 'r', 'synset': 'baguet.n.01', 'synonyms': ['baguet', 'baguette'], 'id': 39, 'def': 'narrow French stick loaf', 'name': 'baguet'}, {'frequency': 'r', 'synset': 'bait.n.02', 'synonyms': ['bait', 'lure'], 'id': 40, 'def': 'something used to lure fish or other animals into danger so they can be trapped or killed', 'name': 'bait'}, {'frequency': 'f', 'synset': 'ball.n.06', 'synonyms': ['ball'], 'id': 41, 'def': 'a spherical object used as a plaything', 'name': 'ball'}, {'frequency': 'r', 'synset': 'ballet_skirt.n.01', 'synonyms': ['ballet_skirt', 'tutu'], 'id': 42, 'def': 'very short skirt worn by ballerinas', 'name': 'ballet_skirt'}, {'frequency': 'f', 'synset': 'balloon.n.01', 'synonyms': ['balloon'], 'id': 43, 'def': 'large tough nonrigid bag filled with gas or heated air', 'name': 'balloon'}, {'frequency': 'c', 'synset': 'bamboo.n.02', 'synonyms': ['bamboo'], 'id': 44, 'def': 'woody tropical grass having hollow woody stems', 'name': 'bamboo'}, {'frequency': 'f', 'synset': 'banana.n.02', 'synonyms': ['banana'], 'id': 45, 'def': 'elongated crescent-shaped yellow fruit with soft sweet flesh', 'name': 'banana'}, {'frequency': 'c', 'synset': 'band_aid.n.01', 'synonyms': ['Band_Aid'], 'id': 46, 'def': 'trade name for an adhesive bandage to cover small cuts or blisters', 'name': 'Band_Aid'}, {'frequency': 'c', 'synset': 'bandage.n.01', 'synonyms': ['bandage'], 'id': 47, 'def': 'a piece of soft material that covers and protects an injured part of the body', 'name': 'bandage'}, {'frequency': 'f', 'synset': 'bandanna.n.01', 'synonyms': ['bandanna', 'bandana'], 'id': 48, 'def': 'large and brightly colored handkerchief; often used as a neckerchief', 'name': 'bandanna'}, {'frequency': 'r', 'synset': 'banjo.n.01', 'synonyms': ['banjo'], 'id': 49, 'def': 'a stringed instrument of the guitar family with a long neck and circular body', 'name': 'banjo'}, {'frequency': 'f', 'synset': 'banner.n.01', 'synonyms': ['banner', 'streamer'], 'id': 50, 'def': 'long strip of cloth or paper used for decoration or advertising', 'name': 'banner'}, {'frequency': 'r', 'synset': 'barbell.n.01', 'synonyms': ['barbell'], 'id': 51, 'def': 'a bar to which heavy discs are attached at each end; used in weightlifting', 'name': 'barbell'}, {'frequency': 'r', 'synset': 'barge.n.01', 'synonyms': ['barge'], 'id': 52, 'def': 'a flatbottom boat for carrying heavy loads (especially on canals)', 'name': 'barge'}, {'frequency': 'f', 'synset': 'barrel.n.02', 'synonyms': ['barrel', 'cask'], 'id': 53, 'def': 'a cylindrical container that holds liquids', 'name': 'barrel'}, {'frequency': 'c', 'synset': 'barrette.n.01', 'synonyms': ['barrette'], 'id': 54, 'def': "a pin for holding women's hair in place", 'name': 'barrette'}, {'frequency': 'c', 'synset': 'barrow.n.03', 'synonyms': ['barrow', 'garden_cart', 'lawn_cart', 'wheelbarrow'], 'id': 55, 'def': 'a cart for carrying small loads; has handles and one or more wheels', 'name': 'barrow'}, {'frequency': 'f', 'synset': 'base.n.03', 'synonyms': ['baseball_base'], 'id': 56, 'def': 'a place that the runner must touch before scoring', 'name': 'baseball_base'}, {'frequency': 'f', 'synset': 'baseball.n.02', 'synonyms': ['baseball'], 'id': 57, 'def': 'a ball used in playing baseball', 'name': 'baseball'}, {'frequency': 'f', 'synset': 'baseball_bat.n.01', 'synonyms': ['baseball_bat'], 'id': 58, 'def': 'an implement used in baseball by the batter', 'name': 'baseball_bat'}, {'frequency': 'f', 'synset': 'baseball_cap.n.01', 'synonyms': ['baseball_cap', 'jockey_cap', 'golf_cap'], 'id': 59, 'def': 'a cap with a bill', 'name': 'baseball_cap'}, {'frequency': 'f', 'synset': 'baseball_glove.n.01', 'synonyms': ['baseball_glove', 'baseball_mitt'], 'id': 60, 'def': 'the handwear used by fielders in playing baseball', 'name': 'baseball_glove'}, {'frequency': 'f', 'synset': 'basket.n.01', 'synonyms': ['basket', 'handbasket'], 'id': 61, 'def': 'a container that is usually woven and has handles', 'name': 'basket'}, {'frequency': 'c', 'synset': 'basketball.n.02', 'synonyms': ['basketball'], 'id': 62, 'def': 'an inflated ball used in playing basketball', 'name': 'basketball'}, {'frequency': 'r', 'synset': 'bass_horn.n.01', 'synonyms': ['bass_horn', 'sousaphone', 'tuba'], 'id': 63, 'def': 'the lowest brass wind instrument', 'name': 'bass_horn'}, {'frequency': 'c', 'synset': 'bat.n.01', 'synonyms': ['bat_(animal)'], 'id': 64, 'def': 'nocturnal mouselike mammal with forelimbs modified to form membranous wings', 'name': 'bat_(animal)'}, {'frequency': 'f', 'synset': 'bath_mat.n.01', 'synonyms': ['bath_mat'], 'id': 65, 'def': 'a heavy towel or mat to stand on while drying yourself after a bath', 'name': 'bath_mat'}, {'frequency': 'f', 'synset': 'bath_towel.n.01', 'synonyms': ['bath_towel'], 'id': 66, 'def': 'a large towel; to dry yourself after a bath', 'name': 'bath_towel'}, {'frequency': 'c', 'synset': 'bathrobe.n.01', 'synonyms': ['bathrobe'], 'id': 67, 'def': 'a loose-fitting robe of towelling; worn after a bath or swim', 'name': 'bathrobe'}, {'frequency': 'f', 'synset': 'bathtub.n.01', 'synonyms': ['bathtub', 'bathing_tub'], 'id': 68, 'def': 'a large open container that you fill with water and use to wash the body', 'name': 'bathtub'}, {'frequency': 'r', 'synset': 'batter.n.02', 'synonyms': ['batter_(food)'], 'id': 69, 'def': 'a liquid or semiliquid mixture, as of flour, eggs, and milk, used in cooking', 'name': 'batter_(food)'}, {'frequency': 'c', 'synset': 'battery.n.02', 'synonyms': ['battery'], 'id': 70, 'def': 'a portable device that produces electricity', 'name': 'battery'}, {'frequency': 'r', 'synset': 'beach_ball.n.01', 'synonyms': ['beachball'], 'id': 71, 'def': 'large and light ball; for play at the seaside', 'name': 'beachball'}, {'frequency': 'c', 'synset': 'bead.n.01', 'synonyms': ['bead'], 'id': 72, 'def': 'a small ball with a hole through the middle used for ornamentation, jewellery, etc.', 'name': 'bead'}, {'frequency': 'c', 'synset': 'bean_curd.n.01', 'synonyms': ['bean_curd', 'tofu'], 'id': 73, 'def': 'cheeselike food made of curdled soybean milk', 'name': 'bean_curd'}, {'frequency': 'c', 'synset': 'beanbag.n.01', 'synonyms': ['beanbag'], 'id': 74, 'def': 'a bag filled with dried beans or similar items; used in games or to sit on', 'name': 'beanbag'}, {'frequency': 'f', 'synset': 'beanie.n.01', 'synonyms': ['beanie', 'beany'], 'id': 75, 'def': 'a small skullcap; formerly worn by schoolboys and college freshmen', 'name': 'beanie'}, {'frequency': 'f', 'synset': 'bear.n.01', 'synonyms': ['bear'], 'id': 76, 'def': 'large carnivorous or omnivorous mammals with shaggy coats and claws', 'name': 'bear'}, {'frequency': 'f', 'synset': 'bed.n.01', 'synonyms': ['bed'], 'id': 77, 'def': 'a piece of furniture that provides a place to sleep', 'name': 'bed'}, {'frequency': 'r', 'synset': 'bedpan.n.01', 'synonyms': ['bedpan'], 'id': 78, 'def': 'a shallow vessel used by a bedridden patient for defecation and urination', 'name': 'bedpan'}, {'frequency': 'f', 'synset': 'bedspread.n.01', 'synonyms': ['bedspread', 'bedcover', 'bed_covering', 'counterpane', 'spread'], 'id': 79, 'def': 'decorative cover for a bed', 'name': 'bedspread'}, {'frequency': 'f', 'synset': 'beef.n.01', 'synonyms': ['cow'], 'id': 80, 'def': 'cattle/cow', 'name': 'cow'}, {'frequency': 'f', 'synset': 'beef.n.02', 'synonyms': ['beef_(food)', 'boeuf_(food)'], 'id': 81, 'def': 'meat from an adult domestic bovine', 'name': 'beef_(food)'}, {'frequency': 'r', 'synset': 'beeper.n.01', 'synonyms': ['beeper', 'pager'], 'id': 82, 'def': 'an device that beeps when the person carrying it is being paged', 'name': 'beeper'}, {'frequency': 'f', 'synset': 'beer_bottle.n.01', 'synonyms': ['beer_bottle'], 'id': 83, 'def': 'a bottle that holds beer', 'name': 'beer_bottle'}, {'frequency': 'c', 'synset': 'beer_can.n.01', 'synonyms': ['beer_can'], 'id': 84, 'def': 'a can that holds beer', 'name': 'beer_can'}, {'frequency': 'r', 'synset': 'beetle.n.01', 'synonyms': ['beetle'], 'id': 85, 'def': 'insect with hard wing covers', 'name': 'beetle'}, {'frequency': 'f', 'synset': 'bell.n.01', 'synonyms': ['bell'], 'id': 86, 'def': 'a hollow device made of metal that makes a ringing sound when struck', 'name': 'bell'}, {'frequency': 'f', 'synset': 'bell_pepper.n.02', 'synonyms': ['bell_pepper', 'capsicum'], 'id': 87, 'def': 'large bell-shaped sweet pepper in green or red or yellow or orange or black varieties', 'name': 'bell_pepper'}, {'frequency': 'f', 'synset': 'belt.n.02', 'synonyms': ['belt'], 'id': 88, 'def': 'a band to tie or buckle around the body (usually at the waist)', 'name': 'belt'}, {'frequency': 'f', 'synset': 'belt_buckle.n.01', 'synonyms': ['belt_buckle'], 'id': 89, 'def': 'the buckle used to fasten a belt', 'name': 'belt_buckle'}, {'frequency': 'f', 'synset': 'bench.n.01', 'synonyms': ['bench'], 'id': 90, 'def': 'a long seat for more than one person', 'name': 'bench'}, {'frequency': 'c', 'synset': 'beret.n.01', 'synonyms': ['beret'], 'id': 91, 'def': 'a cap with no brim or bill; made of soft cloth', 'name': 'beret'}, {'frequency': 'c', 'synset': 'bib.n.02', 'synonyms': ['bib'], 'id': 92, 'def': 'a napkin tied under the chin of a child while eating', 'name': 'bib'}, {'frequency': 'r', 'synset': 'bible.n.01', 'synonyms': ['Bible'], 'id': 93, 'def': 'the sacred writings of the Christian religions', 'name': 'Bible'}, {'frequency': 'f', 'synset': 'bicycle.n.01', 'synonyms': ['bicycle', 'bike_(bicycle)'], 'id': 94, 'def': 'a wheeled vehicle that has two wheels and is moved by foot pedals', 'name': 'bicycle'}, {'frequency': 'f', 'synset': 'bill.n.09', 'synonyms': ['visor', 'vizor'], 'id': 95, 'def': 'a brim that projects to the front to shade the eyes', 'name': 'visor'}, {'frequency': 'f', 'synset': 'billboard.n.01', 'synonyms': ['billboard'], 'id': 96, 'def': 'large outdoor signboard', 'name': 'billboard'}, {'frequency': 'c', 'synset': 'binder.n.03', 'synonyms': ['binder', 'ring-binder'], 'id': 97, 'def': 'holds loose papers or magazines', 'name': 'binder'}, {'frequency': 'c', 'synset': 'binoculars.n.01', 'synonyms': ['binoculars', 'field_glasses', 'opera_glasses'], 'id': 98, 'def': 'an optical instrument designed for simultaneous use by both eyes', 'name': 'binoculars'}, {'frequency': 'f', 'synset': 'bird.n.01', 'synonyms': ['bird'], 'id': 99, 'def': 'animal characterized by feathers and wings', 'name': 'bird'}, {'frequency': 'c', 'synset': 'bird_feeder.n.01', 'synonyms': ['birdfeeder'], 'id': 100, 'def': 'an outdoor device that supplies food for wild birds', 'name': 'birdfeeder'}, {'frequency': 'c', 'synset': 'birdbath.n.01', 'synonyms': ['birdbath'], 'id': 101, 'def': 'an ornamental basin (usually in a garden) for birds to bathe in', 'name': 'birdbath'}, {'frequency': 'c', 'synset': 'birdcage.n.01', 'synonyms': ['birdcage'], 'id': 102, 'def': 'a cage in which a bird can be kept', 'name': 'birdcage'}, {'frequency': 'c', 'synset': 'birdhouse.n.01', 'synonyms': ['birdhouse'], 'id': 103, 'def': 'a shelter for birds', 'name': 'birdhouse'}, {'frequency': 'f', 'synset': 'birthday_cake.n.01', 'synonyms': ['birthday_cake'], 'id': 104, 'def': 'decorated cake served at a birthday party', 'name': 'birthday_cake'}, {'frequency': 'r', 'synset': 'birthday_card.n.01', 'synonyms': ['birthday_card'], 'id': 105, 'def': 'a card expressing a birthday greeting', 'name': 'birthday_card'}, {'frequency': 'r', 'synset': 'black_flag.n.01', 'synonyms': ['pirate_flag'], 'id': 106, 'def': 'a flag usually bearing a white skull and crossbones on a black background', 'name': 'pirate_flag'}, {'frequency': 'c', 'synset': 'black_sheep.n.02', 'synonyms': ['black_sheep'], 'id': 107, 'def': 'sheep with a black coat', 'name': 'black_sheep'}, {'frequency': 'c', 'synset': 'blackberry.n.01', 'synonyms': ['blackberry'], 'id': 108, 'def': 'large sweet black or very dark purple edible aggregate fruit', 'name': 'blackberry'}, {'frequency': 'f', 'synset': 'blackboard.n.01', 'synonyms': ['blackboard', 'chalkboard'], 'id': 109, 'def': 'sheet of slate; for writing with chalk', 'name': 'blackboard'}, {'frequency': 'f', 'synset': 'blanket.n.01', 'synonyms': ['blanket'], 'id': 110, 'def': 'bedding that keeps a person warm in bed', 'name': 'blanket'}, {'frequency': 'c', 'synset': 'blazer.n.01', 'synonyms': ['blazer', 'sport_jacket', 'sport_coat', 'sports_jacket', 'sports_coat'], 'id': 111, 'def': 'lightweight jacket; often striped in the colors of a club or school', 'name': 'blazer'}, {'frequency': 'f', 'synset': 'blender.n.01', 'synonyms': ['blender', 'liquidizer', 'liquidiser'], 'id': 112, 'def': 'an electrically powered mixer that mix or chop or liquefy foods', 'name': 'blender'}, {'frequency': 'r', 'synset': 'blimp.n.02', 'synonyms': ['blimp'], 'id': 113, 'def': 'a small nonrigid airship used for observation or as a barrage balloon', 'name': 'blimp'}, {'frequency': 'f', 'synset': 'blinker.n.01', 'synonyms': ['blinker', 'flasher'], 'id': 114, 'def': 'a light that flashes on and off; used as a signal or to send messages', 'name': 'blinker'}, {'frequency': 'f', 'synset': 'blouse.n.01', 'synonyms': ['blouse'], 'id': 115, 'def': 'a top worn by women', 'name': 'blouse'}, {'frequency': 'f', 'synset': 'blueberry.n.02', 'synonyms': ['blueberry'], 'id': 116, 'def': 'sweet edible dark-blue berries of blueberry plants', 'name': 'blueberry'}, {'frequency': 'r', 'synset': 'board.n.09', 'synonyms': ['gameboard'], 'id': 117, 'def': 'a flat portable surface (usually rectangular) designed for board games', 'name': 'gameboard'}, {'frequency': 'f', 'synset': 'boat.n.01', 'synonyms': ['boat', 'ship_(boat)'], 'id': 118, 'def': 'a vessel for travel on water', 'name': 'boat'}, {'frequency': 'r', 'synset': 'bob.n.05', 'synonyms': ['bob', 'bobber', 'bobfloat'], 'id': 119, 'def': 'a small float usually made of cork; attached to a fishing line', 'name': 'bob'}, {'frequency': 'c', 'synset': 'bobbin.n.01', 'synonyms': ['bobbin', 'spool', 'reel'], 'id': 120, 'def': 'a thing around which thread/tape/film or other flexible materials can be wound', 'name': 'bobbin'}, {'frequency': 'c', 'synset': 'bobby_pin.n.01', 'synonyms': ['bobby_pin', 'hairgrip'], 'id': 121, 'def': 'a flat wire hairpin used to hold bobbed hair in place', 'name': 'bobby_pin'}, {'frequency': 'c', 'synset': 'boiled_egg.n.01', 'synonyms': ['boiled_egg', 'coddled_egg'], 'id': 122, 'def': 'egg cooked briefly in the shell in gently boiling water', 'name': 'boiled_egg'}, {'frequency': 'r', 'synset': 'bolo_tie.n.01', 'synonyms': ['bolo_tie', 'bolo', 'bola_tie', 'bola'], 'id': 123, 'def': 'a cord fastened around the neck with an ornamental clasp and worn as a necktie', 'name': 'bolo_tie'}, {'frequency': 'c', 'synset': 'bolt.n.03', 'synonyms': ['deadbolt'], 'id': 124, 'def': 'the part of a lock that is engaged or withdrawn with a key', 'name': 'deadbolt'}, {'frequency': 'f', 'synset': 'bolt.n.06', 'synonyms': ['bolt'], 'id': 125, 'def': 'a screw that screws into a nut to form a fastener', 'name': 'bolt'}, {'frequency': 'r', 'synset': 'bonnet.n.01', 'synonyms': ['bonnet'], 'id': 126, 'def': 'a hat tied under the chin', 'name': 'bonnet'}, {'frequency': 'f', 'synset': 'book.n.01', 'synonyms': ['book'], 'id': 127, 'def': 'a written work or composition that has been published', 'name': 'book'}, {'frequency': 'c', 'synset': 'bookcase.n.01', 'synonyms': ['bookcase'], 'id': 128, 'def': 'a piece of furniture with shelves for storing books', 'name': 'bookcase'}, {'frequency': 'c', 'synset': 'booklet.n.01', 'synonyms': ['booklet', 'brochure', 'leaflet', 'pamphlet'], 'id': 129, 'def': 'a small book usually having a paper cover', 'name': 'booklet'}, {'frequency': 'r', 'synset': 'bookmark.n.01', 'synonyms': ['bookmark', 'bookmarker'], 'id': 130, 'def': 'a marker (a piece of paper or ribbon) placed between the pages of a book', 'name': 'bookmark'}, {'frequency': 'r', 'synset': 'boom.n.04', 'synonyms': ['boom_microphone', 'microphone_boom'], 'id': 131, 'def': 'a pole carrying an overhead microphone projected over a film or tv set', 'name': 'boom_microphone'}, {'frequency': 'f', 'synset': 'boot.n.01', 'synonyms': ['boot'], 'id': 132, 'def': 'footwear that covers the whole foot and lower leg', 'name': 'boot'}, {'frequency': 'f', 'synset': 'bottle.n.01', 'synonyms': ['bottle'], 'id': 133, 'def': 'a glass or plastic vessel used for storing drinks or other liquids', 'name': 'bottle'}, {'frequency': 'c', 'synset': 'bottle_opener.n.01', 'synonyms': ['bottle_opener'], 'id': 134, 'def': 'an opener for removing caps or corks from bottles', 'name': 'bottle_opener'}, {'frequency': 'c', 'synset': 'bouquet.n.01', 'synonyms': ['bouquet'], 'id': 135, 'def': 'an arrangement of flowers that is usually given as a present', 'name': 'bouquet'}, {'frequency': 'r', 'synset': 'bow.n.04', 'synonyms': ['bow_(weapon)'], 'id': 136, 'def': 'a weapon for shooting arrows', 'name': 'bow_(weapon)'}, {'frequency': 'f', 'synset': 'bow.n.08', 'synonyms': ['bow_(decorative_ribbons)'], 'id': 137, 'def': 'a decorative interlacing of ribbons', 'name': 'bow_(decorative_ribbons)'}, {'frequency': 'f', 'synset': 'bow_tie.n.01', 'synonyms': ['bow-tie', 'bowtie'], 'id': 138, 'def': "a man's tie that ties in a bow", 'name': 'bow-tie'}, {'frequency': 'f', 'synset': 'bowl.n.03', 'synonyms': ['bowl'], 'id': 139, 'def': 'a dish that is round and open at the top for serving foods', 'name': 'bowl'}, {'frequency': 'r', 'synset': 'bowl.n.08', 'synonyms': ['pipe_bowl'], 'id': 140, 'def': 'a small round container that is open at the top for holding tobacco', 'name': 'pipe_bowl'}, {'frequency': 'c', 'synset': 'bowler_hat.n.01', 'synonyms': ['bowler_hat', 'bowler', 'derby_hat', 'derby', 'plug_hat'], 'id': 141, 'def': 'a felt hat that is round and hard with a narrow brim', 'name': 'bowler_hat'}, {'frequency': 'r', 'synset': 'bowling_ball.n.01', 'synonyms': ['bowling_ball'], 'id': 142, 'def': 'a large ball with finger holes used in the sport of bowling', 'name': 'bowling_ball'}, {'frequency': 'f', 'synset': 'box.n.01', 'synonyms': ['box'], 'id': 143, 'def': 'a (usually rectangular) container; may have a lid', 'name': 'box'}, {'frequency': 'r', 'synset': 'boxing_glove.n.01', 'synonyms': ['boxing_glove'], 'id': 144, 'def': 'large glove coverings the fists of a fighter worn for the sport of boxing', 'name': 'boxing_glove'}, {'frequency': 'c', 'synset': 'brace.n.06', 'synonyms': ['suspenders'], 'id': 145, 'def': 'elastic straps that hold trousers up (usually used in the plural)', 'name': 'suspenders'}, {'frequency': 'f', 'synset': 'bracelet.n.02', 'synonyms': ['bracelet', 'bangle'], 'id': 146, 'def': 'jewelry worn around the wrist for decoration', 'name': 'bracelet'}, {'frequency': 'r', 'synset': 'brass.n.07', 'synonyms': ['brass_plaque'], 'id': 147, 'def': 'a memorial made of brass', 'name': 'brass_plaque'}, {'frequency': 'c', 'synset': 'brassiere.n.01', 'synonyms': ['brassiere', 'bra', 'bandeau'], 'id': 148, 'def': 'an undergarment worn by women to support their breasts', 'name': 'brassiere'}, {'frequency': 'c', 'synset': 'bread-bin.n.01', 'synonyms': ['bread-bin', 'breadbox'], 'id': 149, 'def': 'a container used to keep bread or cake in', 'name': 'bread-bin'}, {'frequency': 'f', 'synset': 'bread.n.01', 'synonyms': ['bread'], 'id': 150, 'def': 'food made from dough of flour or meal and usually raised with yeast or baking powder and then baked', 'name': 'bread'}, {'frequency': 'r', 'synset': 'breechcloth.n.01', 'synonyms': ['breechcloth', 'breechclout', 'loincloth'], 'id': 151, 'def': 'a garment that provides covering for the loins', 'name': 'breechcloth'}, {'frequency': 'f', 'synset': 'bridal_gown.n.01', 'synonyms': ['bridal_gown', 'wedding_gown', 'wedding_dress'], 'id': 152, 'def': 'a gown worn by the bride at a wedding', 'name': 'bridal_gown'}, {'frequency': 'c', 'synset': 'briefcase.n.01', 'synonyms': ['briefcase'], 'id': 153, 'def': 'a case with a handle; for carrying papers or files or books', 'name': 'briefcase'}, {'frequency': 'f', 'synset': 'broccoli.n.01', 'synonyms': ['broccoli'], 'id': 154, 'def': 'plant with dense clusters of tight green flower buds', 'name': 'broccoli'}, {'frequency': 'r', 'synset': 'brooch.n.01', 'synonyms': ['broach'], 'id': 155, 'def': 'a decorative pin worn by women', 'name': 'broach'}, {'frequency': 'c', 'synset': 'broom.n.01', 'synonyms': ['broom'], 'id': 156, 'def': 'bundle of straws or twigs attached to a long handle; used for cleaning', 'name': 'broom'}, {'frequency': 'c', 'synset': 'brownie.n.03', 'synonyms': ['brownie'], 'id': 157, 'def': 'square or bar of very rich chocolate cake usually with nuts', 'name': 'brownie'}, {'frequency': 'c', 'synset': 'brussels_sprouts.n.01', 'synonyms': ['brussels_sprouts'], 'id': 158, 'def': 'the small edible cabbage-like buds growing along a stalk', 'name': 'brussels_sprouts'}, {'frequency': 'r', 'synset': 'bubble_gum.n.01', 'synonyms': ['bubble_gum'], 'id': 159, 'def': 'a kind of chewing gum that can be blown into bubbles', 'name': 'bubble_gum'}, {'frequency': 'f', 'synset': 'bucket.n.01', 'synonyms': ['bucket', 'pail'], 'id': 160, 'def': 'a roughly cylindrical vessel that is open at the top', 'name': 'bucket'}, {'frequency': 'r', 'synset': 'buggy.n.01', 'synonyms': ['horse_buggy'], 'id': 161, 'def': 'a small lightweight carriage; drawn by a single horse', 'name': 'horse_buggy'}, {'frequency': 'c', 'synset': 'bull.n.11', 'synonyms': ['horned_cow'], 'id': 162, 'def': 'a cow with horns', 'name': 'bull'}, {'frequency': 'c', 'synset': 'bulldog.n.01', 'synonyms': ['bulldog'], 'id': 163, 'def': 'a thickset short-haired dog with a large head and strong undershot lower jaw', 'name': 'bulldog'}, {'frequency': 'r', 'synset': 'bulldozer.n.01', 'synonyms': ['bulldozer', 'dozer'], 'id': 164, 'def': 'large powerful tractor; a large blade in front flattens areas of ground', 'name': 'bulldozer'}, {'frequency': 'c', 'synset': 'bullet_train.n.01', 'synonyms': ['bullet_train'], 'id': 165, 'def': 'a high-speed passenger train', 'name': 'bullet_train'}, {'frequency': 'c', 'synset': 'bulletin_board.n.02', 'synonyms': ['bulletin_board', 'notice_board'], 'id': 166, 'def': 'a board that hangs on a wall; displays announcements', 'name': 'bulletin_board'}, {'frequency': 'r', 'synset': 'bulletproof_vest.n.01', 'synonyms': ['bulletproof_vest'], 'id': 167, 'def': 'a vest capable of resisting the impact of a bullet', 'name': 'bulletproof_vest'}, {'frequency': 'c', 'synset': 'bullhorn.n.01', 'synonyms': ['bullhorn', 'megaphone'], 'id': 168, 'def': 'a portable loudspeaker with built-in microphone and amplifier', 'name': 'bullhorn'}, {'frequency': 'f', 'synset': 'bun.n.01', 'synonyms': ['bun', 'roll'], 'id': 169, 'def': 'small rounded bread either plain or sweet', 'name': 'bun'}, {'frequency': 'c', 'synset': 'bunk_bed.n.01', 'synonyms': ['bunk_bed'], 'id': 170, 'def': 'beds built one above the other', 'name': 'bunk_bed'}, {'frequency': 'f', 'synset': 'buoy.n.01', 'synonyms': ['buoy'], 'id': 171, 'def': 'a float attached by rope to the seabed to mark channels in a harbor or underwater hazards', 'name': 'buoy'}, {'frequency': 'r', 'synset': 'burrito.n.01', 'synonyms': ['burrito'], 'id': 172, 'def': 'a flour tortilla folded around a filling', 'name': 'burrito'}, {'frequency': 'f', 'synset': 'bus.n.01', 'synonyms': ['bus_(vehicle)', 'autobus', 'charabanc', 'double-decker', 'motorbus', 'motorcoach'], 'id': 173, 'def': 'a vehicle carrying many passengers; used for public transport', 'name': 'bus_(vehicle)'}, {'frequency': 'c', 'synset': 'business_card.n.01', 'synonyms': ['business_card'], 'id': 174, 'def': "a card on which are printed the person's name and business affiliation", 'name': 'business_card'}, {'frequency': 'f', 'synset': 'butter.n.01', 'synonyms': ['butter'], 'id': 175, 'def': 'an edible emulsion of fat globules made by churning milk or cream; for cooking and table use', 'name': 'butter'}, {'frequency': 'c', 'synset': 'butterfly.n.01', 'synonyms': ['butterfly'], 'id': 176, 'def': 'insect typically having a slender body with knobbed antennae and broad colorful wings', 'name': 'butterfly'}, {'frequency': 'f', 'synset': 'button.n.01', 'synonyms': ['button'], 'id': 177, 'def': 'a round fastener sewn to shirts and coats etc to fit through buttonholes', 'name': 'button'}, {'frequency': 'f', 'synset': 'cab.n.03', 'synonyms': ['cab_(taxi)', 'taxi', 'taxicab'], 'id': 178, 'def': 'a car that takes passengers where they want to go in exchange for money', 'name': 'cab_(taxi)'}, {'frequency': 'r', 'synset': 'cabana.n.01', 'synonyms': ['cabana'], 'id': 179, 'def': 'a small tent used as a dressing room beside the sea or a swimming pool', 'name': 'cabana'}, {'frequency': 'c', 'synset': 'cabin_car.n.01', 'synonyms': ['cabin_car', 'caboose'], 'id': 180, 'def': 'a car on a freight train for use of the train crew; usually the last car on the train', 'name': 'cabin_car'}, {'frequency': 'f', 'synset': 'cabinet.n.01', 'synonyms': ['cabinet'], 'id': 181, 'def': 'a piece of furniture resembling a cupboard with doors and shelves and drawers', 'name': 'cabinet'}, {'frequency': 'r', 'synset': 'cabinet.n.03', 'synonyms': ['locker', 'storage_locker'], 'id': 182, 'def': 'a storage compartment for clothes and valuables; usually it has a lock', 'name': 'locker'}, {'frequency': 'f', 'synset': 'cake.n.03', 'synonyms': ['cake'], 'id': 183, 'def': 'baked goods made from or based on a mixture of flour, sugar, eggs, and fat', 'name': 'cake'}, {'frequency': 'c', 'synset': 'calculator.n.02', 'synonyms': ['calculator'], 'id': 184, 'def': 'a small machine that is used for mathematical calculations', 'name': 'calculator'}, {'frequency': 'f', 'synset': 'calendar.n.02', 'synonyms': ['calendar'], 'id': 185, 'def': 'a list or register of events (appointments/social events/court cases, etc)', 'name': 'calendar'}, {'frequency': 'c', 'synset': 'calf.n.01', 'synonyms': ['calf'], 'id': 186, 'def': 'young of domestic cattle', 'name': 'calf'}, {'frequency': 'c', 'synset': 'camcorder.n.01', 'synonyms': ['camcorder'], 'id': 187, 'def': 'a portable television camera and videocassette recorder', 'name': 'camcorder'}, {'frequency': 'c', 'synset': 'camel.n.01', 'synonyms': ['camel'], 'id': 188, 'def': 'cud-chewing mammal used as a draft or saddle animal in desert regions', 'name': 'camel'}, {'frequency': 'f', 'synset': 'camera.n.01', 'synonyms': ['camera'], 'id': 189, 'def': 'equipment for taking photographs', 'name': 'camera'}, {'frequency': 'c', 'synset': 'camera_lens.n.01', 'synonyms': ['camera_lens'], 'id': 190, 'def': 'a lens that focuses the image in a camera', 'name': 'camera_lens'}, {'frequency': 'c', 'synset': 'camper.n.02', 'synonyms': ['camper_(vehicle)', 'camping_bus', 'motor_home'], 'id': 191, 'def': 'a recreational vehicle equipped for camping out while traveling', 'name': 'camper_(vehicle)'}, {'frequency': 'f', 'synset': 'can.n.01', 'synonyms': ['can', 'tin_can'], 'id': 192, 'def': 'airtight sealed metal container for food or drink or paint etc.', 'name': 'can'}, {'frequency': 'c', 'synset': 'can_opener.n.01', 'synonyms': ['can_opener', 'tin_opener'], 'id': 193, 'def': 'a device for cutting cans open', 'name': 'can_opener'}, {'frequency': 'f', 'synset': 'candle.n.01', 'synonyms': ['candle', 'candlestick'], 'id': 194, 'def': 'stick of wax with a wick in the middle', 'name': 'candle'}, {'frequency': 'f', 'synset': 'candlestick.n.01', 'synonyms': ['candle_holder'], 'id': 195, 'def': 'a holder with sockets for candles', 'name': 'candle_holder'}, {'frequency': 'r', 'synset': 'candy_bar.n.01', 'synonyms': ['candy_bar'], 'id': 196, 'def': 'a candy shaped as a bar', 'name': 'candy_bar'}, {'frequency': 'c', 'synset': 'candy_cane.n.01', 'synonyms': ['candy_cane'], 'id': 197, 'def': 'a hard candy in the shape of a rod (usually with stripes)', 'name': 'candy_cane'}, {'frequency': 'c', 'synset': 'cane.n.01', 'synonyms': ['walking_cane'], 'id': 198, 'def': 'a stick that people can lean on to help them walk', 'name': 'walking_cane'}, {'frequency': 'c', 'synset': 'canister.n.02', 'synonyms': ['canister', 'cannister'], 'id': 199, 'def': 'metal container for storing dry foods such as tea or flour', 'name': 'canister'}, {'frequency': 'c', 'synset': 'canoe.n.01', 'synonyms': ['canoe'], 'id': 200, 'def': 'small and light boat; pointed at both ends; propelled with a paddle', 'name': 'canoe'}, {'frequency': 'c', 'synset': 'cantaloup.n.02', 'synonyms': ['cantaloup', 'cantaloupe'], 'id': 201, 'def': 'the fruit of a cantaloup vine; small to medium-sized melon with yellowish flesh', 'name': 'cantaloup'}, {'frequency': 'r', 'synset': 'canteen.n.01', 'synonyms': ['canteen'], 'id': 202, 'def': 'a flask for carrying water; used by soldiers or travelers', 'name': 'canteen'}, {'frequency': 'f', 'synset': 'cap.n.01', 'synonyms': ['cap_(headwear)'], 'id': 203, 'def': 'a tight-fitting headwear', 'name': 'cap_(headwear)'}, {'frequency': 'f', 'synset': 'cap.n.02', 'synonyms': ['bottle_cap', 'cap_(container_lid)'], 'id': 204, 'def': 'a top (as for a bottle)', 'name': 'bottle_cap'}, {'frequency': 'c', 'synset': 'cape.n.02', 'synonyms': ['cape'], 'id': 205, 'def': 'a sleeveless garment like a cloak but shorter', 'name': 'cape'}, {'frequency': 'c', 'synset': 'cappuccino.n.01', 'synonyms': ['cappuccino', 'coffee_cappuccino'], 'id': 206, 'def': 'equal parts of espresso and steamed milk', 'name': 'cappuccino'}, {'frequency': 'f', 'synset': 'car.n.01', 'synonyms': ['car_(automobile)', 'auto_(automobile)', 'automobile'], 'id': 207, 'def': 'a motor vehicle with four wheels', 'name': 'car_(automobile)'}, {'frequency': 'f', 'synset': 'car.n.02', 'synonyms': ['railcar_(part_of_a_train)', 'railway_car_(part_of_a_train)', 'railroad_car_(part_of_a_train)'], 'id': 208, 'def': 'a wheeled vehicle adapted to the rails of railroad (mark each individual railcar separately)', 'name': 'railcar_(part_of_a_train)'}, {'frequency': 'r', 'synset': 'car.n.04', 'synonyms': ['elevator_car'], 'id': 209, 'def': 'where passengers ride up and down', 'name': 'elevator_car'}, {'frequency': 'r', 'synset': 'car_battery.n.01', 'synonyms': ['car_battery', 'automobile_battery'], 'id': 210, 'def': 'a battery in a motor vehicle', 'name': 'car_battery'}, {'frequency': 'c', 'synset': 'card.n.02', 'synonyms': ['identity_card'], 'id': 211, 'def': 'a card certifying the identity of the bearer', 'name': 'identity_card'}, {'frequency': 'c', 'synset': 'card.n.03', 'synonyms': ['card'], 'id': 212, 'def': 'a rectangular piece of paper used to send messages (e.g. greetings or pictures)', 'name': 'card'}, {'frequency': 'c', 'synset': 'cardigan.n.01', 'synonyms': ['cardigan'], 'id': 213, 'def': 'knitted jacket that is fastened up the front with buttons or a zipper', 'name': 'cardigan'}, {'frequency': 'r', 'synset': 'cargo_ship.n.01', 'synonyms': ['cargo_ship', 'cargo_vessel'], 'id': 214, 'def': 'a ship designed to carry cargo', 'name': 'cargo_ship'}, {'frequency': 'r', 'synset': 'carnation.n.01', 'synonyms': ['carnation'], 'id': 215, 'def': 'plant with pink to purple-red spice-scented usually double flowers', 'name': 'carnation'}, {'frequency': 'c', 'synset': 'carriage.n.02', 'synonyms': ['horse_carriage'], 'id': 216, 'def': 'a vehicle with wheels drawn by one or more horses', 'name': 'horse_carriage'}, {'frequency': 'f', 'synset': 'carrot.n.01', 'synonyms': ['carrot'], 'id': 217, 'def': 'deep orange edible root of the cultivated carrot plant', 'name': 'carrot'}, {'frequency': 'f', 'synset': 'carryall.n.01', 'synonyms': ['tote_bag'], 'id': 218, 'def': 'a capacious bag or basket', 'name': 'tote_bag'}, {'frequency': 'c', 'synset': 'cart.n.01', 'synonyms': ['cart'], 'id': 219, 'def': 'a heavy open wagon usually having two wheels and drawn by an animal', 'name': 'cart'}, {'frequency': 'c', 'synset': 'carton.n.02', 'synonyms': ['carton'], 'id': 220, 'def': 'a container made of cardboard for holding food or drink', 'name': 'carton'}, {'frequency': 'c', 'synset': 'cash_register.n.01', 'synonyms': ['cash_register', 'register_(for_cash_transactions)'], 'id': 221, 'def': 'a cashbox with an adding machine to register transactions', 'name': 'cash_register'}, {'frequency': 'r', 'synset': 'casserole.n.01', 'synonyms': ['casserole'], 'id': 222, 'def': 'food cooked and served in a casserole', 'name': 'casserole'}, {'frequency': 'r', 'synset': 'cassette.n.01', 'synonyms': ['cassette'], 'id': 223, 'def': 'a container that holds a magnetic tape used for recording or playing sound or video', 'name': 'cassette'}, {'frequency': 'c', 'synset': 'cast.n.05', 'synonyms': ['cast', 'plaster_cast', 'plaster_bandage'], 'id': 224, 'def': 'bandage consisting of a firm covering that immobilizes broken bones while they heal', 'name': 'cast'}, {'frequency': 'f', 'synset': 'cat.n.01', 'synonyms': ['cat'], 'id': 225, 'def': 'a domestic house cat', 'name': 'cat'}, {'frequency': 'f', 'synset': 'cauliflower.n.02', 'synonyms': ['cauliflower'], 'id': 226, 'def': 'edible compact head of white undeveloped flowers', 'name': 'cauliflower'}, {'frequency': 'c', 'synset': 'cayenne.n.02', 'synonyms': ['cayenne_(spice)', 'cayenne_pepper_(spice)', 'red_pepper_(spice)'], 'id': 227, 'def': 'ground pods and seeds of pungent red peppers of the genus Capsicum', 'name': 'cayenne_(spice)'}, {'frequency': 'c', 'synset': 'cd_player.n.01', 'synonyms': ['CD_player'], 'id': 228, 'def': 'electronic equipment for playing compact discs (CDs)', 'name': 'CD_player'}, {'frequency': 'f', 'synset': 'celery.n.01', 'synonyms': ['celery'], 'id': 229, 'def': 'widely cultivated herb with aromatic leaf stalks that are eaten raw or cooked', 'name': 'celery'}, {'frequency': 'f', 'synset': 'cellular_telephone.n.01', 'synonyms': ['cellular_telephone', 'cellular_phone', 'cellphone', 'mobile_phone', 'smart_phone'], 'id': 230, 'def': 'a hand-held mobile telephone', 'name': 'cellular_telephone'}, {'frequency': 'r', 'synset': 'chain_mail.n.01', 'synonyms': ['chain_mail', 'ring_mail', 'chain_armor', 'chain_armour', 'ring_armor', 'ring_armour'], 'id': 231, 'def': '(Middle Ages) flexible armor made of interlinked metal rings', 'name': 'chain_mail'}, {'frequency': 'f', 'synset': 'chair.n.01', 'synonyms': ['chair'], 'id': 232, 'def': 'a seat for one person, with a support for the back', 'name': 'chair'}, {'frequency': 'r', 'synset': 'chaise_longue.n.01', 'synonyms': ['chaise_longue', 'chaise', 'daybed'], 'id': 233, 'def': 'a long chair; for reclining', 'name': 'chaise_longue'}, {'frequency': 'r', 'synset': 'chalice.n.01', 'synonyms': ['chalice'], 'id': 234, 'def': 'a bowl-shaped drinking vessel; especially the Eucharistic cup', 'name': 'chalice'}, {'frequency': 'f', 'synset': 'chandelier.n.01', 'synonyms': ['chandelier'], 'id': 235, 'def': 'branched lighting fixture; often ornate; hangs from the ceiling', 'name': 'chandelier'}, {'frequency': 'r', 'synset': 'chap.n.04', 'synonyms': ['chap'], 'id': 236, 'def': 'leather leggings without a seat; worn over trousers by cowboys to protect their legs', 'name': 'chap'}, {'frequency': 'r', 'synset': 'checkbook.n.01', 'synonyms': ['checkbook', 'chequebook'], 'id': 237, 'def': 'a book issued to holders of checking accounts', 'name': 'checkbook'}, {'frequency': 'r', 'synset': 'checkerboard.n.01', 'synonyms': ['checkerboard'], 'id': 238, 'def': 'a board having 64 squares of two alternating colors', 'name': 'checkerboard'}, {'frequency': 'c', 'synset': 'cherry.n.03', 'synonyms': ['cherry'], 'id': 239, 'def': 'a red fruit with a single hard stone', 'name': 'cherry'}, {'frequency': 'r', 'synset': 'chessboard.n.01', 'synonyms': ['chessboard'], 'id': 240, 'def': 'a checkerboard used to play chess', 'name': 'chessboard'}, {'frequency': 'c', 'synset': 'chicken.n.02', 'synonyms': ['chicken_(animal)'], 'id': 241, 'def': 'a domestic fowl bred for flesh or eggs', 'name': 'chicken_(animal)'}, {'frequency': 'c', 'synset': 'chickpea.n.01', 'synonyms': ['chickpea', 'garbanzo'], 'id': 242, 'def': 'the seed of the chickpea plant; usually dried', 'name': 'chickpea'}, {'frequency': 'c', 'synset': 'chili.n.02', 'synonyms': ['chili_(vegetable)', 'chili_pepper_(vegetable)', 'chilli_(vegetable)', 'chilly_(vegetable)', 'chile_(vegetable)'], 'id': 243, 'def': 'very hot and finely tapering pepper of special pungency', 'name': 'chili_(vegetable)'}, {'frequency': 'r', 'synset': 'chime.n.01', 'synonyms': ['chime', 'gong'], 'id': 244, 'def': 'an instrument consisting of a set of bells that are struck with a hammer', 'name': 'chime'}, {'frequency': 'r', 'synset': 'chinaware.n.01', 'synonyms': ['chinaware'], 'id': 245, 'def': 'dishware made of high quality porcelain', 'name': 'chinaware'}, {'frequency': 'c', 'synset': 'chip.n.04', 'synonyms': ['crisp_(potato_chip)', 'potato_chip'], 'id': 246, 'def': 'a thin crisp slice of potato fried in deep fat', 'name': 'crisp_(potato_chip)'}, {'frequency': 'r', 'synset': 'chip.n.06', 'synonyms': ['poker_chip'], 'id': 247, 'def': 'a small disk-shaped counter used to represent money when gambling', 'name': 'poker_chip'}, {'frequency': 'c', 'synset': 'chocolate_bar.n.01', 'synonyms': ['chocolate_bar'], 'id': 248, 'def': 'a bar of chocolate candy', 'name': 'chocolate_bar'}, {'frequency': 'c', 'synset': 'chocolate_cake.n.01', 'synonyms': ['chocolate_cake'], 'id': 249, 'def': 'cake containing chocolate', 'name': 'chocolate_cake'}, {'frequency': 'r', 'synset': 'chocolate_milk.n.01', 'synonyms': ['chocolate_milk'], 'id': 250, 'def': 'milk flavored with chocolate syrup', 'name': 'chocolate_milk'}, {'frequency': 'r', 'synset': 'chocolate_mousse.n.01', 'synonyms': ['chocolate_mousse'], 'id': 251, 'def': 'dessert mousse made with chocolate', 'name': 'chocolate_mousse'}, {'frequency': 'f', 'synset': 'choker.n.03', 'synonyms': ['choker', 'collar', 'neckband'], 'id': 252, 'def': 'shirt collar, animal collar, or tight-fitting necklace', 'name': 'choker'}, {'frequency': 'f', 'synset': 'chopping_board.n.01', 'synonyms': ['chopping_board', 'cutting_board', 'chopping_block'], 'id': 253, 'def': 'a wooden board where meats or vegetables can be cut', 'name': 'chopping_board'}, {'frequency': 'f', 'synset': 'chopstick.n.01', 'synonyms': ['chopstick'], 'id': 254, 'def': 'one of a pair of slender sticks used as oriental tableware to eat food with', 'name': 'chopstick'}, {'frequency': 'f', 'synset': 'christmas_tree.n.05', 'synonyms': ['Christmas_tree'], 'id': 255, 'def': 'an ornamented evergreen used as a Christmas decoration', 'name': 'Christmas_tree'}, {'frequency': 'c', 'synset': 'chute.n.02', 'synonyms': ['slide'], 'id': 256, 'def': 'sloping channel through which things can descend', 'name': 'slide'}, {'frequency': 'r', 'synset': 'cider.n.01', 'synonyms': ['cider', 'cyder'], 'id': 257, 'def': 'a beverage made from juice pressed from apples', 'name': 'cider'}, {'frequency': 'r', 'synset': 'cigar_box.n.01', 'synonyms': ['cigar_box'], 'id': 258, 'def': 'a box for holding cigars', 'name': 'cigar_box'}, {'frequency': 'f', 'synset': 'cigarette.n.01', 'synonyms': ['cigarette'], 'id': 259, 'def': 'finely ground tobacco wrapped in paper; for smoking', 'name': 'cigarette'}, {'frequency': 'c', 'synset': 'cigarette_case.n.01', 'synonyms': ['cigarette_case', 'cigarette_pack'], 'id': 260, 'def': 'a small flat case for holding cigarettes', 'name': 'cigarette_case'}, {'frequency': 'f', 'synset': 'cistern.n.02', 'synonyms': ['cistern', 'water_tank'], 'id': 261, 'def': 'a tank that holds the water used to flush a toilet', 'name': 'cistern'}, {'frequency': 'r', 'synset': 'clarinet.n.01', 'synonyms': ['clarinet'], 'id': 262, 'def': 'a single-reed instrument with a straight tube', 'name': 'clarinet'}, {'frequency': 'c', 'synset': 'clasp.n.01', 'synonyms': ['clasp'], 'id': 263, 'def': 'a fastener (as a buckle or hook) that is used to hold two things together', 'name': 'clasp'}, {'frequency': 'c', 'synset': 'cleansing_agent.n.01', 'synonyms': ['cleansing_agent', 'cleanser', 'cleaner'], 'id': 264, 'def': 'a preparation used in cleaning something', 'name': 'cleansing_agent'}, {'frequency': 'r', 'synset': 'cleat.n.02', 'synonyms': ['cleat_(for_securing_rope)'], 'id': 265, 'def': 'a fastener (usually with two projecting horns) around which a rope can be secured', 'name': 'cleat_(for_securing_rope)'}, {'frequency': 'r', 'synset': 'clementine.n.01', 'synonyms': ['clementine'], 'id': 266, 'def': 'a variety of mandarin orange', 'name': 'clementine'}, {'frequency': 'c', 'synset': 'clip.n.03', 'synonyms': ['clip'], 'id': 267, 'def': 'any of various small fasteners used to hold loose articles together', 'name': 'clip'}, {'frequency': 'c', 'synset': 'clipboard.n.01', 'synonyms': ['clipboard'], 'id': 268, 'def': 'a small writing board with a clip at the top for holding papers', 'name': 'clipboard'}, {'frequency': 'r', 'synset': 'clipper.n.03', 'synonyms': ['clippers_(for_plants)'], 'id': 269, 'def': 'shears for cutting grass or shrubbery (often used in the plural)', 'name': 'clippers_(for_plants)'}, {'frequency': 'r', 'synset': 'cloak.n.02', 'synonyms': ['cloak'], 'id': 270, 'def': 'a loose outer garment', 'name': 'cloak'}, {'frequency': 'f', 'synset': 'clock.n.01', 'synonyms': ['clock', 'timepiece', 'timekeeper'], 'id': 271, 'def': 'a timepiece that shows the time of day', 'name': 'clock'}, {'frequency': 'f', 'synset': 'clock_tower.n.01', 'synonyms': ['clock_tower'], 'id': 272, 'def': 'a tower with a large clock visible high up on an outside face', 'name': 'clock_tower'}, {'frequency': 'c', 'synset': 'clothes_hamper.n.01', 'synonyms': ['clothes_hamper', 'laundry_basket', 'clothes_basket'], 'id': 273, 'def': 'a hamper that holds dirty clothes to be washed or wet clothes to be dried', 'name': 'clothes_hamper'}, {'frequency': 'c', 'synset': 'clothespin.n.01', 'synonyms': ['clothespin', 'clothes_peg'], 'id': 274, 'def': 'wood or plastic fastener; for holding clothes on a clothesline', 'name': 'clothespin'}, {'frequency': 'r', 'synset': 'clutch_bag.n.01', 'synonyms': ['clutch_bag'], 'id': 275, 'def': "a woman's strapless purse that is carried in the hand", 'name': 'clutch_bag'}, {'frequency': 'f', 'synset': 'coaster.n.03', 'synonyms': ['coaster'], 'id': 276, 'def': 'a covering (plate or mat) that protects the surface of a table', 'name': 'coaster'}, {'frequency': 'f', 'synset': 'coat.n.01', 'synonyms': ['coat'], 'id': 277, 'def': 'an outer garment that has sleeves and covers the body from shoulder down', 'name': 'coat'}, {'frequency': 'c', 'synset': 'coat_hanger.n.01', 'synonyms': ['coat_hanger', 'clothes_hanger', 'dress_hanger'], 'id': 278, 'def': "a hanger that is shaped like a person's shoulders", 'name': 'coat_hanger'}, {'frequency': 'c', 'synset': 'coatrack.n.01', 'synonyms': ['coatrack', 'hatrack'], 'id': 279, 'def': 'a rack with hooks for temporarily holding coats and hats', 'name': 'coatrack'}, {'frequency': 'c', 'synset': 'cock.n.04', 'synonyms': ['cock', 'rooster'], 'id': 280, 'def': 'adult male chicken', 'name': 'cock'}, {'frequency': 'r', 'synset': 'cockroach.n.01', 'synonyms': ['cockroach'], 'id': 281, 'def': 'any of numerous chiefly nocturnal insects; some are domestic pests', 'name': 'cockroach'}, {'frequency': 'r', 'synset': 'cocoa.n.01', 'synonyms': ['cocoa_(beverage)', 'hot_chocolate_(beverage)', 'drinking_chocolate'], 'id': 282, 'def': 'a beverage made from cocoa powder and milk and sugar; usually drunk hot', 'name': 'cocoa_(beverage)'}, {'frequency': 'c', 'synset': 'coconut.n.02', 'synonyms': ['coconut', 'cocoanut'], 'id': 283, 'def': 'large hard-shelled brown oval nut with a fibrous husk', 'name': 'coconut'}, {'frequency': 'f', 'synset': 'coffee_maker.n.01', 'synonyms': ['coffee_maker', 'coffee_machine'], 'id': 284, 'def': 'a kitchen appliance for brewing coffee automatically', 'name': 'coffee_maker'}, {'frequency': 'f', 'synset': 'coffee_table.n.01', 'synonyms': ['coffee_table', 'cocktail_table'], 'id': 285, 'def': 'low table where magazines can be placed and coffee or cocktails are served', 'name': 'coffee_table'}, {'frequency': 'c', 'synset': 'coffeepot.n.01', 'synonyms': ['coffeepot'], 'id': 286, 'def': 'tall pot in which coffee is brewed', 'name': 'coffeepot'}, {'frequency': 'r', 'synset': 'coil.n.05', 'synonyms': ['coil'], 'id': 287, 'def': 'tubing that is wound in a spiral', 'name': 'coil'}, {'frequency': 'c', 'synset': 'coin.n.01', 'synonyms': ['coin'], 'id': 288, 'def': 'a flat metal piece (usually a disc) used as money', 'name': 'coin'}, {'frequency': 'c', 'synset': 'colander.n.01', 'synonyms': ['colander', 'cullender'], 'id': 289, 'def': 'bowl-shaped strainer; used to wash or drain foods', 'name': 'colander'}, {'frequency': 'c', 'synset': 'coleslaw.n.01', 'synonyms': ['coleslaw', 'slaw'], 'id': 290, 'def': 'basically shredded cabbage', 'name': 'coleslaw'}, {'frequency': 'r', 'synset': 'coloring_material.n.01', 'synonyms': ['coloring_material', 'colouring_material'], 'id': 291, 'def': 'any material used for its color', 'name': 'coloring_material'}, {'frequency': 'r', 'synset': 'combination_lock.n.01', 'synonyms': ['combination_lock'], 'id': 292, 'def': 'lock that can be opened only by turning dials in a special sequence', 'name': 'combination_lock'}, {'frequency': 'c', 'synset': 'comforter.n.04', 'synonyms': ['pacifier', 'teething_ring'], 'id': 293, 'def': 'device used for an infant to suck or bite on', 'name': 'pacifier'}, {'frequency': 'r', 'synset': 'comic_book.n.01', 'synonyms': ['comic_book'], 'id': 294, 'def': 'a magazine devoted to comic strips', 'name': 'comic_book'}, {'frequency': 'r', 'synset': 'compass.n.01', 'synonyms': ['compass'], 'id': 295, 'def': 'navigational instrument for finding directions', 'name': 'compass'}, {'frequency': 'f', 'synset': 'computer_keyboard.n.01', 'synonyms': ['computer_keyboard', 'keyboard_(computer)'], 'id': 296, 'def': 'a keyboard that is a data input device for computers', 'name': 'computer_keyboard'}, {'frequency': 'f', 'synset': 'condiment.n.01', 'synonyms': ['condiment'], 'id': 297, 'def': 'a preparation (a sauce or relish or spice) to enhance flavor or enjoyment', 'name': 'condiment'}, {'frequency': 'f', 'synset': 'cone.n.01', 'synonyms': ['cone', 'traffic_cone'], 'id': 298, 'def': 'a cone-shaped object used to direct traffic', 'name': 'cone'}, {'frequency': 'f', 'synset': 'control.n.09', 'synonyms': ['control', 'controller'], 'id': 299, 'def': 'a mechanism that controls the operation of a machine', 'name': 'control'}, {'frequency': 'r', 'synset': 'convertible.n.01', 'synonyms': ['convertible_(automobile)'], 'id': 300, 'def': 'a car that has top that can be folded or removed', 'name': 'convertible_(automobile)'}, {'frequency': 'r', 'synset': 'convertible.n.03', 'synonyms': ['sofa_bed'], 'id': 301, 'def': 'a sofa that can be converted into a bed', 'name': 'sofa_bed'}, {'frequency': 'r', 'synset': 'cooker.n.01', 'synonyms': ['cooker'], 'id': 302, 'def': 'a utensil for cooking', 'name': 'cooker'}, {'frequency': 'f', 'synset': 'cookie.n.01', 'synonyms': ['cookie', 'cooky', 'biscuit_(cookie)'], 'id': 303, 'def': "any of various small flat sweet cakes (`biscuit' is the British term)", 'name': 'cookie'}, {'frequency': 'r', 'synset': 'cooking_utensil.n.01', 'synonyms': ['cooking_utensil'], 'id': 304, 'def': 'a kitchen utensil made of material that does not melt easily; used for cooking', 'name': 'cooking_utensil'}, {'frequency': 'f', 'synset': 'cooler.n.01', 'synonyms': ['cooler_(for_food)', 'ice_chest'], 'id': 305, 'def': 'an insulated box for storing food often with ice', 'name': 'cooler_(for_food)'}, {'frequency': 'f', 'synset': 'cork.n.04', 'synonyms': ['cork_(bottle_plug)', 'bottle_cork'], 'id': 306, 'def': 'the plug in the mouth of a bottle (especially a wine bottle)', 'name': 'cork_(bottle_plug)'}, {'frequency': 'r', 'synset': 'corkboard.n.01', 'synonyms': ['corkboard'], 'id': 307, 'def': 'a sheet consisting of cork granules', 'name': 'corkboard'}, {'frequency': 'c', 'synset': 'corkscrew.n.01', 'synonyms': ['corkscrew', 'bottle_screw'], 'id': 308, 'def': 'a bottle opener that pulls corks', 'name': 'corkscrew'}, {'frequency': 'f', 'synset': 'corn.n.03', 'synonyms': ['edible_corn', 'corn', 'maize'], 'id': 309, 'def': 'ears or kernels of corn that can be prepared and served for human food (only mark individual ears or kernels)', 'name': 'edible_corn'}, {'frequency': 'r', 'synset': 'cornbread.n.01', 'synonyms': ['cornbread'], 'id': 310, 'def': 'bread made primarily of cornmeal', 'name': 'cornbread'}, {'frequency': 'c', 'synset': 'cornet.n.01', 'synonyms': ['cornet', 'horn', 'trumpet'], 'id': 311, 'def': 'a brass musical instrument with a narrow tube and a flared bell and many valves', 'name': 'cornet'}, {'frequency': 'c', 'synset': 'cornice.n.01', 'synonyms': ['cornice', 'valance', 'valance_board', 'pelmet'], 'id': 312, 'def': 'a decorative framework to conceal curtain fixtures at the top of a window casing', 'name': 'cornice'}, {'frequency': 'r', 'synset': 'cornmeal.n.01', 'synonyms': ['cornmeal'], 'id': 313, 'def': 'coarsely ground corn', 'name': 'cornmeal'}, {'frequency': 'c', 'synset': 'corset.n.01', 'synonyms': ['corset', 'girdle'], 'id': 314, 'def': "a woman's close-fitting foundation garment", 'name': 'corset'}, {'frequency': 'c', 'synset': 'costume.n.04', 'synonyms': ['costume'], 'id': 315, 'def': 'the attire characteristic of a country or a time or a social class', 'name': 'costume'}, {'frequency': 'r', 'synset': 'cougar.n.01', 'synonyms': ['cougar', 'puma', 'catamount', 'mountain_lion', 'panther'], 'id': 316, 'def': 'large American feline resembling a lion', 'name': 'cougar'}, {'frequency': 'r', 'synset': 'coverall.n.01', 'synonyms': ['coverall'], 'id': 317, 'def': 'a loose-fitting protective garment that is worn over other clothing', 'name': 'coverall'}, {'frequency': 'c', 'synset': 'cowbell.n.01', 'synonyms': ['cowbell'], 'id': 318, 'def': 'a bell hung around the neck of cow so that the cow can be easily located', 'name': 'cowbell'}, {'frequency': 'f', 'synset': 'cowboy_hat.n.01', 'synonyms': ['cowboy_hat', 'ten-gallon_hat'], 'id': 319, 'def': 'a hat with a wide brim and a soft crown; worn by American ranch hands', 'name': 'cowboy_hat'}, {'frequency': 'c', 'synset': 'crab.n.01', 'synonyms': ['crab_(animal)'], 'id': 320, 'def': 'decapod having eyes on short stalks and a broad flattened shell and pincers', 'name': 'crab_(animal)'}, {'frequency': 'r', 'synset': 'crab.n.05', 'synonyms': ['crabmeat'], 'id': 321, 'def': 'the edible flesh of any of various crabs', 'name': 'crabmeat'}, {'frequency': 'c', 'synset': 'cracker.n.01', 'synonyms': ['cracker'], 'id': 322, 'def': 'a thin crisp wafer', 'name': 'cracker'}, {'frequency': 'r', 'synset': 'crape.n.01', 'synonyms': ['crape', 'crepe', 'French_pancake'], 'id': 323, 'def': 'small very thin pancake', 'name': 'crape'}, {'frequency': 'f', 'synset': 'crate.n.01', 'synonyms': ['crate'], 'id': 324, 'def': 'a rugged box (usually made of wood); used for shipping', 'name': 'crate'}, {'frequency': 'c', 'synset': 'crayon.n.01', 'synonyms': ['crayon', 'wax_crayon'], 'id': 325, 'def': 'writing or drawing implement made of a colored stick of composition wax', 'name': 'crayon'}, {'frequency': 'r', 'synset': 'cream_pitcher.n.01', 'synonyms': ['cream_pitcher'], 'id': 326, 'def': 'a small pitcher for serving cream', 'name': 'cream_pitcher'}, {'frequency': 'c', 'synset': 'crescent_roll.n.01', 'synonyms': ['crescent_roll', 'croissant'], 'id': 327, 'def': 'very rich flaky crescent-shaped roll', 'name': 'crescent_roll'}, {'frequency': 'c', 'synset': 'crib.n.01', 'synonyms': ['crib', 'cot'], 'id': 328, 'def': 'baby bed with high sides made of slats', 'name': 'crib'}, {'frequency': 'c', 'synset': 'crock.n.03', 'synonyms': ['crock_pot', 'earthenware_jar'], 'id': 329, 'def': 'an earthen jar (made of baked clay) or a modern electric crockpot', 'name': 'crock_pot'}, {'frequency': 'f', 'synset': 'crossbar.n.01', 'synonyms': ['crossbar'], 'id': 330, 'def': 'a horizontal bar that goes across something', 'name': 'crossbar'}, {'frequency': 'r', 'synset': 'crouton.n.01', 'synonyms': ['crouton'], 'id': 331, 'def': 'a small piece of toasted or fried bread; served in soup or salads', 'name': 'crouton'}, {'frequency': 'c', 'synset': 'crow.n.01', 'synonyms': ['crow'], 'id': 332, 'def': 'black birds having a raucous call', 'name': 'crow'}, {'frequency': 'r', 'synset': 'crowbar.n.01', 'synonyms': ['crowbar', 'wrecking_bar', 'pry_bar'], 'id': 333, 'def': 'a heavy iron lever with one end forged into a wedge', 'name': 'crowbar'}, {'frequency': 'c', 'synset': 'crown.n.04', 'synonyms': ['crown'], 'id': 334, 'def': 'an ornamental jeweled headdress signifying sovereignty', 'name': 'crown'}, {'frequency': 'c', 'synset': 'crucifix.n.01', 'synonyms': ['crucifix'], 'id': 335, 'def': 'representation of the cross on which Jesus died', 'name': 'crucifix'}, {'frequency': 'c', 'synset': 'cruise_ship.n.01', 'synonyms': ['cruise_ship', 'cruise_liner'], 'id': 336, 'def': 'a passenger ship used commercially for pleasure cruises', 'name': 'cruise_ship'}, {'frequency': 'c', 'synset': 'cruiser.n.01', 'synonyms': ['police_cruiser', 'patrol_car', 'police_car', 'squad_car'], 'id': 337, 'def': 'a car in which policemen cruise the streets', 'name': 'police_cruiser'}, {'frequency': 'f', 'synset': 'crumb.n.03', 'synonyms': ['crumb'], 'id': 338, 'def': 'small piece of e.g. bread or cake', 'name': 'crumb'}, {'frequency': 'c', 'synset': 'crutch.n.01', 'synonyms': ['crutch'], 'id': 339, 'def': 'a wooden or metal staff that fits under the armpit and reaches to the ground', 'name': 'crutch'}, {'frequency': 'c', 'synset': 'cub.n.03', 'synonyms': ['cub_(animal)'], 'id': 340, 'def': 'the young of certain carnivorous mammals such as the bear or wolf or lion', 'name': 'cub_(animal)'}, {'frequency': 'c', 'synset': 'cube.n.05', 'synonyms': ['cube', 'square_block'], 'id': 341, 'def': 'a block in the (approximate) shape of a cube', 'name': 'cube'}, {'frequency': 'f', 'synset': 'cucumber.n.02', 'synonyms': ['cucumber', 'cuke'], 'id': 342, 'def': 'cylindrical green fruit with thin green rind and white flesh eaten as a vegetable', 'name': 'cucumber'}, {'frequency': 'c', 'synset': 'cufflink.n.01', 'synonyms': ['cufflink'], 'id': 343, 'def': 'jewelry consisting of linked buttons used to fasten the cuffs of a shirt', 'name': 'cufflink'}, {'frequency': 'f', 'synset': 'cup.n.01', 'synonyms': ['cup'], 'id': 344, 'def': 'a small open container usually used for drinking; usually has a handle', 'name': 'cup'}, {'frequency': 'c', 'synset': 'cup.n.08', 'synonyms': ['trophy_cup'], 'id': 345, 'def': 'a metal award or cup-shaped vessel with handles that is awarded as a trophy to a competition winner', 'name': 'trophy_cup'}, {'frequency': 'f', 'synset': 'cupboard.n.01', 'synonyms': ['cupboard', 'closet'], 'id': 346, 'def': 'a small room (or recess) or cabinet used for storage space', 'name': 'cupboard'}, {'frequency': 'f', 'synset': 'cupcake.n.01', 'synonyms': ['cupcake'], 'id': 347, 'def': 'small cake baked in a muffin tin', 'name': 'cupcake'}, {'frequency': 'r', 'synset': 'curler.n.01', 'synonyms': ['hair_curler', 'hair_roller', 'hair_crimper'], 'id': 348, 'def': 'a cylindrical tube around which the hair is wound to curl it', 'name': 'hair_curler'}, {'frequency': 'r', 'synset': 'curling_iron.n.01', 'synonyms': ['curling_iron'], 'id': 349, 'def': 'a cylindrical home appliance that heats hair that has been curled around it', 'name': 'curling_iron'}, {'frequency': 'f', 'synset': 'curtain.n.01', 'synonyms': ['curtain', 'drapery'], 'id': 350, 'def': 'hanging cloth used as a blind (especially for a window)', 'name': 'curtain'}, {'frequency': 'f', 'synset': 'cushion.n.03', 'synonyms': ['cushion'], 'id': 351, 'def': 'a soft bag filled with air or padding such as feathers or foam rubber', 'name': 'cushion'}, {'frequency': 'r', 'synset': 'cylinder.n.04', 'synonyms': ['cylinder'], 'id': 352, 'def': 'a cylindrical container', 'name': 'cylinder'}, {'frequency': 'r', 'synset': 'cymbal.n.01', 'synonyms': ['cymbal'], 'id': 353, 'def': 'a percussion instrument consisting of a concave brass disk', 'name': 'cymbal'}, {'frequency': 'r', 'synset': 'dagger.n.01', 'synonyms': ['dagger'], 'id': 354, 'def': 'a short knife with a pointed blade used for piercing or stabbing', 'name': 'dagger'}, {'frequency': 'r', 'synset': 'dalmatian.n.02', 'synonyms': ['dalmatian'], 'id': 355, 'def': 'a large breed having a smooth white coat with black or brown spots', 'name': 'dalmatian'}, {'frequency': 'c', 'synset': 'dartboard.n.01', 'synonyms': ['dartboard'], 'id': 356, 'def': 'a circular board of wood or cork used as the target in the game of darts', 'name': 'dartboard'}, {'frequency': 'r', 'synset': 'date.n.08', 'synonyms': ['date_(fruit)'], 'id': 357, 'def': 'sweet edible fruit of the date palm with a single long woody seed', 'name': 'date_(fruit)'}, {'frequency': 'f', 'synset': 'deck_chair.n.01', 'synonyms': ['deck_chair', 'beach_chair'], 'id': 358, 'def': 'a folding chair for use outdoors; a wooden frame supports a length of canvas', 'name': 'deck_chair'}, {'frequency': 'c', 'synset': 'deer.n.01', 'synonyms': ['deer', 'cervid'], 'id': 359, 'def': "distinguished from Bovidae by the male's having solid deciduous antlers", 'name': 'deer'}, {'frequency': 'c', 'synset': 'dental_floss.n.01', 'synonyms': ['dental_floss', 'floss'], 'id': 360, 'def': 'a soft thread for cleaning the spaces between the teeth', 'name': 'dental_floss'}, {'frequency': 'f', 'synset': 'desk.n.01', 'synonyms': ['desk'], 'id': 361, 'def': 'a piece of furniture with a writing surface and usually drawers or other compartments', 'name': 'desk'}, {'frequency': 'r', 'synset': 'detergent.n.01', 'synonyms': ['detergent'], 'id': 362, 'def': 'a surface-active chemical widely used in industry and laundering', 'name': 'detergent'}, {'frequency': 'c', 'synset': 'diaper.n.01', 'synonyms': ['diaper'], 'id': 363, 'def': 'garment consisting of a folded cloth drawn up between the legs and fastened at the waist', 'name': 'diaper'}, {'frequency': 'r', 'synset': 'diary.n.01', 'synonyms': ['diary', 'journal'], 'id': 364, 'def': 'yearly planner book', 'name': 'diary'}, {'frequency': 'r', 'synset': 'die.n.01', 'synonyms': ['die', 'dice'], 'id': 365, 'def': 'a small cube with 1 to 6 spots on the six faces; used in gambling', 'name': 'die'}, {'frequency': 'r', 'synset': 'dinghy.n.01', 'synonyms': ['dinghy', 'dory', 'rowboat'], 'id': 366, 'def': 'a small boat of shallow draft with seats and oars with which it is propelled', 'name': 'dinghy'}, {'frequency': 'f', 'synset': 'dining_table.n.01', 'synonyms': ['dining_table'], 'id': 367, 'def': 'a table at which meals are served', 'name': 'dining_table'}, {'frequency': 'r', 'synset': 'dinner_jacket.n.01', 'synonyms': ['tux', 'tuxedo'], 'id': 368, 'def': 'semiformal evening dress for men', 'name': 'tux'}, {'frequency': 'f', 'synset': 'dish.n.01', 'synonyms': ['dish'], 'id': 369, 'def': 'a piece of dishware normally used as a container for holding or serving food', 'name': 'dish'}, {'frequency': 'c', 'synset': 'dish.n.05', 'synonyms': ['dish_antenna'], 'id': 370, 'def': 'directional antenna consisting of a parabolic reflector', 'name': 'dish_antenna'}, {'frequency': 'c', 'synset': 'dishrag.n.01', 'synonyms': ['dishrag', 'dishcloth'], 'id': 371, 'def': 'a cloth for washing dishes or cleaning in general', 'name': 'dishrag'}, {'frequency': 'f', 'synset': 'dishtowel.n.01', 'synonyms': ['dishtowel', 'tea_towel'], 'id': 372, 'def': 'a towel for drying dishes', 'name': 'dishtowel'}, {'frequency': 'f', 'synset': 'dishwasher.n.01', 'synonyms': ['dishwasher', 'dishwashing_machine'], 'id': 373, 'def': 'a machine for washing dishes', 'name': 'dishwasher'}, {'frequency': 'r', 'synset': 'dishwasher_detergent.n.01', 'synonyms': ['dishwasher_detergent', 'dishwashing_detergent', 'dishwashing_liquid', 'dishsoap'], 'id': 374, 'def': 'dishsoap or dish detergent designed for use in dishwashers', 'name': 'dishwasher_detergent'}, {'frequency': 'f', 'synset': 'dispenser.n.01', 'synonyms': ['dispenser'], 'id': 375, 'def': 'a container so designed that the contents can be used in prescribed amounts', 'name': 'dispenser'}, {'frequency': 'r', 'synset': 'diving_board.n.01', 'synonyms': ['diving_board'], 'id': 376, 'def': 'a springboard from which swimmers can dive', 'name': 'diving_board'}, {'frequency': 'f', 'synset': 'dixie_cup.n.01', 'synonyms': ['Dixie_cup', 'paper_cup'], 'id': 377, 'def': 'a disposable cup made of paper; for holding drinks', 'name': 'Dixie_cup'}, {'frequency': 'f', 'synset': 'dog.n.01', 'synonyms': ['dog'], 'id': 378, 'def': 'a common domesticated dog', 'name': 'dog'}, {'frequency': 'f', 'synset': 'dog_collar.n.01', 'synonyms': ['dog_collar'], 'id': 379, 'def': 'a collar for a dog', 'name': 'dog_collar'}, {'frequency': 'f', 'synset': 'doll.n.01', 'synonyms': ['doll'], 'id': 380, 'def': 'a toy replica of a HUMAN (NOT AN ANIMAL)', 'name': 'doll'}, {'frequency': 'r', 'synset': 'dollar.n.02', 'synonyms': ['dollar', 'dollar_bill', 'one_dollar_bill'], 'id': 381, 'def': 'a piece of paper money worth one dollar', 'name': 'dollar'}, {'frequency': 'r', 'synset': 'dollhouse.n.01', 'synonyms': ['dollhouse', "doll's_house"], 'id': 382, 'def': "a house so small that it is likened to a child's plaything", 'name': 'dollhouse'}, {'frequency': 'c', 'synset': 'dolphin.n.02', 'synonyms': ['dolphin'], 'id': 383, 'def': 'any of various small toothed whales with a beaklike snout; larger than porpoises', 'name': 'dolphin'}, {'frequency': 'c', 'synset': 'domestic_ass.n.01', 'synonyms': ['domestic_ass', 'donkey'], 'id': 384, 'def': 'domestic beast of burden descended from the African wild ass; patient but stubborn', 'name': 'domestic_ass'}, {'frequency': 'f', 'synset': 'doorknob.n.01', 'synonyms': ['doorknob', 'doorhandle'], 'id': 385, 'def': "a knob used to open a door (often called `doorhandle' in Great Britain)", 'name': 'doorknob'}, {'frequency': 'c', 'synset': 'doormat.n.02', 'synonyms': ['doormat', 'welcome_mat'], 'id': 386, 'def': 'a mat placed outside an exterior door for wiping the shoes before entering', 'name': 'doormat'}, {'frequency': 'f', 'synset': 'doughnut.n.02', 'synonyms': ['doughnut', 'donut'], 'id': 387, 'def': 'a small ring-shaped friedcake', 'name': 'doughnut'}, {'frequency': 'r', 'synset': 'dove.n.01', 'synonyms': ['dove'], 'id': 388, 'def': 'any of numerous small pigeons', 'name': 'dove'}, {'frequency': 'r', 'synset': 'dragonfly.n.01', 'synonyms': ['dragonfly'], 'id': 389, 'def': 'slender-bodied non-stinging insect having iridescent wings that are outspread at rest', 'name': 'dragonfly'}, {'frequency': 'f', 'synset': 'drawer.n.01', 'synonyms': ['drawer'], 'id': 390, 'def': 'a boxlike container in a piece of furniture; made so as to slide in and out', 'name': 'drawer'}, {'frequency': 'c', 'synset': 'drawers.n.01', 'synonyms': ['underdrawers', 'boxers', 'boxershorts'], 'id': 391, 'def': 'underpants worn by men', 'name': 'underdrawers'}, {'frequency': 'f', 'synset': 'dress.n.01', 'synonyms': ['dress', 'frock'], 'id': 392, 'def': 'a one-piece garment for a woman; has skirt and bodice', 'name': 'dress'}, {'frequency': 'c', 'synset': 'dress_hat.n.01', 'synonyms': ['dress_hat', 'high_hat', 'opera_hat', 'silk_hat', 'top_hat'], 'id': 393, 'def': "a man's hat with a tall crown; usually covered with silk or with beaver fur", 'name': 'dress_hat'}, {'frequency': 'f', 'synset': 'dress_suit.n.01', 'synonyms': ['dress_suit'], 'id': 394, 'def': 'formalwear consisting of full evening dress for men', 'name': 'dress_suit'}, {'frequency': 'f', 'synset': 'dresser.n.05', 'synonyms': ['dresser'], 'id': 395, 'def': 'a cabinet with shelves', 'name': 'dresser'}, {'frequency': 'c', 'synset': 'drill.n.01', 'synonyms': ['drill'], 'id': 396, 'def': 'a tool with a sharp rotating point for making holes in hard materials', 'name': 'drill'}, {'frequency': 'r', 'synset': 'drone.n.04', 'synonyms': ['drone'], 'id': 397, 'def': 'an aircraft without a pilot that is operated by remote control', 'name': 'drone'}, {'frequency': 'r', 'synset': 'dropper.n.01', 'synonyms': ['dropper', 'eye_dropper'], 'id': 398, 'def': 'pipet consisting of a small tube with a vacuum bulb at one end for drawing liquid in and releasing it a drop at a time', 'name': 'dropper'}, {'frequency': 'c', 'synset': 'drum.n.01', 'synonyms': ['drum_(musical_instrument)'], 'id': 399, 'def': 'a musical percussion instrument; usually consists of a hollow cylinder with a membrane stretched across each end', 'name': 'drum_(musical_instrument)'}, {'frequency': 'r', 'synset': 'drumstick.n.02', 'synonyms': ['drumstick'], 'id': 400, 'def': 'a stick used for playing a drum', 'name': 'drumstick'}, {'frequency': 'f', 'synset': 'duck.n.01', 'synonyms': ['duck'], 'id': 401, 'def': 'small web-footed broad-billed swimming bird', 'name': 'duck'}, {'frequency': 'c', 'synset': 'duckling.n.02', 'synonyms': ['duckling'], 'id': 402, 'def': 'young duck', 'name': 'duckling'}, {'frequency': 'c', 'synset': 'duct_tape.n.01', 'synonyms': ['duct_tape'], 'id': 403, 'def': 'a wide silvery adhesive tape', 'name': 'duct_tape'}, {'frequency': 'f', 'synset': 'duffel_bag.n.01', 'synonyms': ['duffel_bag', 'duffle_bag', 'duffel', 'duffle'], 'id': 404, 'def': 'a large cylindrical bag of heavy cloth (does not include suitcases)', 'name': 'duffel_bag'}, {'frequency': 'r', 'synset': 'dumbbell.n.01', 'synonyms': ['dumbbell'], 'id': 405, 'def': 'an exercising weight with two ball-like ends connected by a short handle', 'name': 'dumbbell'}, {'frequency': 'c', 'synset': 'dumpster.n.01', 'synonyms': ['dumpster'], 'id': 406, 'def': 'a container designed to receive and transport and dump waste', 'name': 'dumpster'}, {'frequency': 'r', 'synset': 'dustpan.n.02', 'synonyms': ['dustpan'], 'id': 407, 'def': 'a short-handled receptacle into which dust can be swept', 'name': 'dustpan'}, {'frequency': 'c', 'synset': 'eagle.n.01', 'synonyms': ['eagle'], 'id': 408, 'def': 'large birds of prey noted for their broad wings and strong soaring flight', 'name': 'eagle'}, {'frequency': 'f', 'synset': 'earphone.n.01', 'synonyms': ['earphone', 'earpiece', 'headphone'], 'id': 409, 'def': 'device for listening to audio that is held over or inserted into the ear', 'name': 'earphone'}, {'frequency': 'r', 'synset': 'earplug.n.01', 'synonyms': ['earplug'], 'id': 410, 'def': 'a soft plug that is inserted into the ear canal to block sound', 'name': 'earplug'}, {'frequency': 'f', 'synset': 'earring.n.01', 'synonyms': ['earring'], 'id': 411, 'def': 'jewelry to ornament the ear', 'name': 'earring'}, {'frequency': 'c', 'synset': 'easel.n.01', 'synonyms': ['easel'], 'id': 412, 'def': "an upright tripod for displaying something (usually an artist's canvas)", 'name': 'easel'}, {'frequency': 'r', 'synset': 'eclair.n.01', 'synonyms': ['eclair'], 'id': 413, 'def': 'oblong cream puff', 'name': 'eclair'}, {'frequency': 'r', 'synset': 'eel.n.01', 'synonyms': ['eel'], 'id': 414, 'def': 'an elongate fish with fatty flesh', 'name': 'eel'}, {'frequency': 'f', 'synset': 'egg.n.02', 'synonyms': ['egg', 'eggs'], 'id': 415, 'def': 'oval reproductive body of a fowl (especially a hen) used as food', 'name': 'egg'}, {'frequency': 'r', 'synset': 'egg_roll.n.01', 'synonyms': ['egg_roll', 'spring_roll'], 'id': 416, 'def': 'minced vegetables and meat wrapped in a pancake and fried', 'name': 'egg_roll'}, {'frequency': 'c', 'synset': 'egg_yolk.n.01', 'synonyms': ['egg_yolk', 'yolk_(egg)'], 'id': 417, 'def': 'the yellow spherical part of an egg', 'name': 'egg_yolk'}, {'frequency': 'c', 'synset': 'eggbeater.n.02', 'synonyms': ['eggbeater', 'eggwhisk'], 'id': 418, 'def': 'a mixer for beating eggs or whipping cream', 'name': 'eggbeater'}, {'frequency': 'c', 'synset': 'eggplant.n.01', 'synonyms': ['eggplant', 'aubergine'], 'id': 419, 'def': 'egg-shaped vegetable having a shiny skin typically dark purple', 'name': 'eggplant'}, {'frequency': 'r', 'synset': 'electric_chair.n.01', 'synonyms': ['electric_chair'], 'id': 420, 'def': 'a chair-shaped instrument of execution by electrocution', 'name': 'electric_chair'}, {'frequency': 'f', 'synset': 'electric_refrigerator.n.01', 'synonyms': ['refrigerator'], 'id': 421, 'def': 'a refrigerator in which the coolant is pumped around by an electric motor', 'name': 'refrigerator'}, {'frequency': 'f', 'synset': 'elephant.n.01', 'synonyms': ['elephant'], 'id': 422, 'def': 'a common elephant', 'name': 'elephant'}, {'frequency': 'c', 'synset': 'elk.n.01', 'synonyms': ['elk', 'moose'], 'id': 423, 'def': 'large northern deer with enormous flattened antlers in the male', 'name': 'elk'}, {'frequency': 'c', 'synset': 'envelope.n.01', 'synonyms': ['envelope'], 'id': 424, 'def': 'a flat (usually rectangular) container for a letter, thin package, etc.', 'name': 'envelope'}, {'frequency': 'c', 'synset': 'eraser.n.01', 'synonyms': ['eraser'], 'id': 425, 'def': 'an implement used to erase something', 'name': 'eraser'}, {'frequency': 'r', 'synset': 'escargot.n.01', 'synonyms': ['escargot'], 'id': 426, 'def': 'edible snail usually served in the shell with a sauce of melted butter and garlic', 'name': 'escargot'}, {'frequency': 'r', 'synset': 'eyepatch.n.01', 'synonyms': ['eyepatch'], 'id': 427, 'def': 'a protective cloth covering for an injured eye', 'name': 'eyepatch'}, {'frequency': 'r', 'synset': 'falcon.n.01', 'synonyms': ['falcon'], 'id': 428, 'def': 'birds of prey having long pointed powerful wings adapted for swift flight', 'name': 'falcon'}, {'frequency': 'f', 'synset': 'fan.n.01', 'synonyms': ['fan'], 'id': 429, 'def': 'a device for creating a current of air by movement of a surface or surfaces', 'name': 'fan'}, {'frequency': 'f', 'synset': 'faucet.n.01', 'synonyms': ['faucet', 'spigot', 'tap'], 'id': 430, 'def': 'a regulator for controlling the flow of a liquid from a reservoir', 'name': 'faucet'}, {'frequency': 'r', 'synset': 'fedora.n.01', 'synonyms': ['fedora'], 'id': 431, 'def': 'a hat made of felt with a creased crown', 'name': 'fedora'}, {'frequency': 'r', 'synset': 'ferret.n.02', 'synonyms': ['ferret'], 'id': 432, 'def': 'domesticated albino variety of the European polecat bred for hunting rats and rabbits', 'name': 'ferret'}, {'frequency': 'c', 'synset': 'ferris_wheel.n.01', 'synonyms': ['Ferris_wheel'], 'id': 433, 'def': 'a large wheel with suspended seats that remain upright as the wheel rotates', 'name': 'Ferris_wheel'}, {'frequency': 'c', 'synset': 'ferry.n.01', 'synonyms': ['ferry', 'ferryboat'], 'id': 434, 'def': 'a boat that transports people or vehicles across a body of water and operates on a regular schedule', 'name': 'ferry'}, {'frequency': 'r', 'synset': 'fig.n.04', 'synonyms': ['fig_(fruit)'], 'id': 435, 'def': 'fleshy sweet pear-shaped yellowish or purple fruit eaten fresh or preserved or dried', 'name': 'fig_(fruit)'}, {'frequency': 'c', 'synset': 'fighter.n.02', 'synonyms': ['fighter_jet', 'fighter_aircraft', 'attack_aircraft'], 'id': 436, 'def': 'a high-speed military or naval airplane designed to destroy enemy targets', 'name': 'fighter_jet'}, {'frequency': 'f', 'synset': 'figurine.n.01', 'synonyms': ['figurine'], 'id': 437, 'def': 'a small carved or molded figure', 'name': 'figurine'}, {'frequency': 'c', 'synset': 'file.n.03', 'synonyms': ['file_cabinet', 'filing_cabinet'], 'id': 438, 'def': 'office furniture consisting of a container for keeping papers in order', 'name': 'file_cabinet'}, {'frequency': 'r', 'synset': 'file.n.04', 'synonyms': ['file_(tool)'], 'id': 439, 'def': 'a steel hand tool with small sharp teeth on some or all of its surfaces; used for smoothing wood or metal', 'name': 'file_(tool)'}, {'frequency': 'f', 'synset': 'fire_alarm.n.02', 'synonyms': ['fire_alarm', 'smoke_alarm'], 'id': 440, 'def': 'an alarm that is tripped off by fire or smoke', 'name': 'fire_alarm'}, {'frequency': 'f', 'synset': 'fire_engine.n.01', 'synonyms': ['fire_engine', 'fire_truck'], 'id': 441, 'def': 'large trucks that carry firefighters and equipment to the site of a fire', 'name': 'fire_engine'}, {'frequency': 'f', 'synset': 'fire_extinguisher.n.01', 'synonyms': ['fire_extinguisher', 'extinguisher'], 'id': 442, 'def': 'a manually operated device for extinguishing small fires', 'name': 'fire_extinguisher'}, {'frequency': 'c', 'synset': 'fire_hose.n.01', 'synonyms': ['fire_hose'], 'id': 443, 'def': 'a large hose that carries water from a fire hydrant to the site of the fire', 'name': 'fire_hose'}, {'frequency': 'f', 'synset': 'fireplace.n.01', 'synonyms': ['fireplace'], 'id': 444, 'def': 'an open recess in a wall at the base of a chimney where a fire can be built', 'name': 'fireplace'}, {'frequency': 'f', 'synset': 'fireplug.n.01', 'synonyms': ['fireplug', 'fire_hydrant', 'hydrant'], 'id': 445, 'def': 'an upright hydrant for drawing water to use in fighting a fire', 'name': 'fireplug'}, {'frequency': 'r', 'synset': 'first-aid_kit.n.01', 'synonyms': ['first-aid_kit'], 'id': 446, 'def': 'kit consisting of a set of bandages and medicines for giving first aid', 'name': 'first-aid_kit'}, {'frequency': 'f', 'synset': 'fish.n.01', 'synonyms': ['fish'], 'id': 447, 'def': 'any of various mostly cold-blooded aquatic vertebrates usually having scales and breathing through gills', 'name': 'fish'}, {'frequency': 'c', 'synset': 'fish.n.02', 'synonyms': ['fish_(food)'], 'id': 448, 'def': 'the flesh of fish used as food', 'name': 'fish_(food)'}, {'frequency': 'r', 'synset': 'fishbowl.n.02', 'synonyms': ['fishbowl', 'goldfish_bowl'], 'id': 449, 'def': 'a transparent bowl in which small fish are kept', 'name': 'fishbowl'}, {'frequency': 'c', 'synset': 'fishing_rod.n.01', 'synonyms': ['fishing_rod', 'fishing_pole'], 'id': 450, 'def': 'a rod that is used in fishing to extend the fishing line', 'name': 'fishing_rod'}, {'frequency': 'f', 'synset': 'flag.n.01', 'synonyms': ['flag'], 'id': 451, 'def': 'emblem usually consisting of a rectangular piece of cloth of distinctive design (do not include pole)', 'name': 'flag'}, {'frequency': 'f', 'synset': 'flagpole.n.02', 'synonyms': ['flagpole', 'flagstaff'], 'id': 452, 'def': 'a tall staff or pole on which a flag is raised', 'name': 'flagpole'}, {'frequency': 'c', 'synset': 'flamingo.n.01', 'synonyms': ['flamingo'], 'id': 453, 'def': 'large pink web-footed bird with down-bent bill', 'name': 'flamingo'}, {'frequency': 'c', 'synset': 'flannel.n.01', 'synonyms': ['flannel'], 'id': 454, 'def': 'a soft light woolen fabric; used for clothing', 'name': 'flannel'}, {'frequency': 'c', 'synset': 'flap.n.01', 'synonyms': ['flap'], 'id': 455, 'def': 'any broad thin covering attached at one edge, such as a mud flap next to a wheel or a flap on an airplane wing', 'name': 'flap'}, {'frequency': 'r', 'synset': 'flash.n.10', 'synonyms': ['flash', 'flashbulb'], 'id': 456, 'def': 'a lamp for providing momentary light to take a photograph', 'name': 'flash'}, {'frequency': 'c', 'synset': 'flashlight.n.01', 'synonyms': ['flashlight', 'torch'], 'id': 457, 'def': 'a small portable battery-powered electric lamp', 'name': 'flashlight'}, {'frequency': 'r', 'synset': 'fleece.n.03', 'synonyms': ['fleece'], 'id': 458, 'def': 'a soft bulky fabric with deep pile; used chiefly for clothing', 'name': 'fleece'}, {'frequency': 'f', 'synset': 'flip-flop.n.02', 'synonyms': ['flip-flop_(sandal)'], 'id': 459, 'def': 'a backless sandal held to the foot by a thong between two toes', 'name': 'flip-flop_(sandal)'}, {'frequency': 'c', 'synset': 'flipper.n.01', 'synonyms': ['flipper_(footwear)', 'fin_(footwear)'], 'id': 460, 'def': 'a shoe to aid a person in swimming', 'name': 'flipper_(footwear)'}, {'frequency': 'f', 'synset': 'flower_arrangement.n.01', 'synonyms': ['flower_arrangement', 'floral_arrangement'], 'id': 461, 'def': 'a decorative arrangement of flowers', 'name': 'flower_arrangement'}, {'frequency': 'c', 'synset': 'flute.n.02', 'synonyms': ['flute_glass', 'champagne_flute'], 'id': 462, 'def': 'a tall narrow wineglass', 'name': 'flute_glass'}, {'frequency': 'c', 'synset': 'foal.n.01', 'synonyms': ['foal'], 'id': 463, 'def': 'a young horse', 'name': 'foal'}, {'frequency': 'c', 'synset': 'folding_chair.n.01', 'synonyms': ['folding_chair'], 'id': 464, 'def': 'a chair that can be folded flat for storage', 'name': 'folding_chair'}, {'frequency': 'c', 'synset': 'food_processor.n.01', 'synonyms': ['food_processor'], 'id': 465, 'def': 'a kitchen appliance for shredding, blending, chopping, or slicing food', 'name': 'food_processor'}, {'frequency': 'c', 'synset': 'football.n.02', 'synonyms': ['football_(American)'], 'id': 466, 'def': 'the inflated oblong ball used in playing American football', 'name': 'football_(American)'}, {'frequency': 'r', 'synset': 'football_helmet.n.01', 'synonyms': ['football_helmet'], 'id': 467, 'def': 'a padded helmet with a face mask to protect the head of football players', 'name': 'football_helmet'}, {'frequency': 'c', 'synset': 'footstool.n.01', 'synonyms': ['footstool', 'footrest'], 'id': 468, 'def': 'a low seat or a stool to rest the feet of a seated person', 'name': 'footstool'}, {'frequency': 'f', 'synset': 'fork.n.01', 'synonyms': ['fork'], 'id': 469, 'def': 'cutlery used for serving and eating food', 'name': 'fork'}, {'frequency': 'c', 'synset': 'forklift.n.01', 'synonyms': ['forklift'], 'id': 470, 'def': 'an industrial vehicle with a power operated fork in front that can be inserted under loads to lift and move them', 'name': 'forklift'}, {'frequency': 'c', 'synset': 'freight_car.n.01', 'synonyms': ['freight_car'], 'id': 471, 'def': 'a railway car that carries freight', 'name': 'freight_car'}, {'frequency': 'c', 'synset': 'french_toast.n.01', 'synonyms': ['French_toast'], 'id': 472, 'def': 'bread slice dipped in egg and milk and fried', 'name': 'French_toast'}, {'frequency': 'c', 'synset': 'freshener.n.01', 'synonyms': ['freshener', 'air_freshener'], 'id': 473, 'def': 'anything that freshens air by removing or covering odor', 'name': 'freshener'}, {'frequency': 'f', 'synset': 'frisbee.n.01', 'synonyms': ['frisbee'], 'id': 474, 'def': 'a light, plastic disk propelled with a flip of the wrist for recreation or competition', 'name': 'frisbee'}, {'frequency': 'c', 'synset': 'frog.n.01', 'synonyms': ['frog', 'toad', 'toad_frog'], 'id': 475, 'def': 'a tailless stout-bodied amphibians with long hind limbs for leaping', 'name': 'frog'}, {'frequency': 'c', 'synset': 'fruit_juice.n.01', 'synonyms': ['fruit_juice'], 'id': 476, 'def': 'drink produced by squeezing or crushing fruit', 'name': 'fruit_juice'}, {'frequency': 'f', 'synset': 'frying_pan.n.01', 'synonyms': ['frying_pan', 'frypan', 'skillet'], 'id': 477, 'def': 'a pan used for frying foods', 'name': 'frying_pan'}, {'frequency': 'r', 'synset': 'fudge.n.01', 'synonyms': ['fudge'], 'id': 478, 'def': 'soft creamy candy', 'name': 'fudge'}, {'frequency': 'r', 'synset': 'funnel.n.02', 'synonyms': ['funnel'], 'id': 479, 'def': 'a cone-shaped utensil used to channel a substance into a container with a small mouth', 'name': 'funnel'}, {'frequency': 'r', 'synset': 'futon.n.01', 'synonyms': ['futon'], 'id': 480, 'def': 'a pad that is used for sleeping on the floor or on a raised frame', 'name': 'futon'}, {'frequency': 'r', 'synset': 'gag.n.02', 'synonyms': ['gag', 'muzzle'], 'id': 481, 'def': "restraint put into a person's mouth to prevent speaking or shouting", 'name': 'gag'}, {'frequency': 'r', 'synset': 'garbage.n.03', 'synonyms': ['garbage'], 'id': 482, 'def': 'a receptacle where waste can be discarded', 'name': 'garbage'}, {'frequency': 'c', 'synset': 'garbage_truck.n.01', 'synonyms': ['garbage_truck'], 'id': 483, 'def': 'a truck for collecting domestic refuse', 'name': 'garbage_truck'}, {'frequency': 'c', 'synset': 'garden_hose.n.01', 'synonyms': ['garden_hose'], 'id': 484, 'def': 'a hose used for watering a lawn or garden', 'name': 'garden_hose'}, {'frequency': 'c', 'synset': 'gargle.n.01', 'synonyms': ['gargle', 'mouthwash'], 'id': 485, 'def': 'a medicated solution used for gargling and rinsing the mouth', 'name': 'gargle'}, {'frequency': 'r', 'synset': 'gargoyle.n.02', 'synonyms': ['gargoyle'], 'id': 486, 'def': 'an ornament consisting of a grotesquely carved figure of a person or animal', 'name': 'gargoyle'}, {'frequency': 'c', 'synset': 'garlic.n.02', 'synonyms': ['garlic', 'ail'], 'id': 487, 'def': 'aromatic bulb used as seasoning', 'name': 'garlic'}, {'frequency': 'r', 'synset': 'gasmask.n.01', 'synonyms': ['gasmask', 'respirator', 'gas_helmet'], 'id': 488, 'def': 'a protective face mask with a filter', 'name': 'gasmask'}, {'frequency': 'c', 'synset': 'gazelle.n.01', 'synonyms': ['gazelle'], 'id': 489, 'def': 'small swift graceful antelope of Africa and Asia having lustrous eyes', 'name': 'gazelle'}, {'frequency': 'c', 'synset': 'gelatin.n.02', 'synonyms': ['gelatin', 'jelly'], 'id': 490, 'def': 'an edible jelly made with gelatin and used as a dessert or salad base or a coating for foods', 'name': 'gelatin'}, {'frequency': 'r', 'synset': 'gem.n.02', 'synonyms': ['gemstone'], 'id': 491, 'def': 'a crystalline rock that can be cut and polished for jewelry', 'name': 'gemstone'}, {'frequency': 'r', 'synset': 'generator.n.02', 'synonyms': ['generator'], 'id': 492, 'def': 'engine that converts mechanical energy into electrical energy by electromagnetic induction', 'name': 'generator'}, {'frequency': 'c', 'synset': 'giant_panda.n.01', 'synonyms': ['giant_panda', 'panda', 'panda_bear'], 'id': 493, 'def': 'large black-and-white herbivorous mammal of bamboo forests of China and Tibet', 'name': 'giant_panda'}, {'frequency': 'c', 'synset': 'gift_wrap.n.01', 'synonyms': ['gift_wrap'], 'id': 494, 'def': 'attractive wrapping paper suitable for wrapping gifts', 'name': 'gift_wrap'}, {'frequency': 'c', 'synset': 'ginger.n.03', 'synonyms': ['ginger', 'gingerroot'], 'id': 495, 'def': 'the root of the common ginger plant; used fresh as a seasoning', 'name': 'ginger'}, {'frequency': 'f', 'synset': 'giraffe.n.01', 'synonyms': ['giraffe'], 'id': 496, 'def': 'tall animal having a spotted coat and small horns and very long neck and legs', 'name': 'giraffe'}, {'frequency': 'c', 'synset': 'girdle.n.02', 'synonyms': ['cincture', 'sash', 'waistband', 'waistcloth'], 'id': 497, 'def': 'a band of material around the waist that strengthens a skirt or trousers', 'name': 'cincture'}, {'frequency': 'f', 'synset': 'glass.n.02', 'synonyms': ['glass_(drink_container)', 'drinking_glass'], 'id': 498, 'def': 'a container for holding liquids while drinking', 'name': 'glass_(drink_container)'}, {'frequency': 'c', 'synset': 'globe.n.03', 'synonyms': ['globe'], 'id': 499, 'def': 'a sphere on which a map (especially of the earth) is represented', 'name': 'globe'}, {'frequency': 'f', 'synset': 'glove.n.02', 'synonyms': ['glove'], 'id': 500, 'def': 'handwear covering the hand', 'name': 'glove'}, {'frequency': 'c', 'synset': 'goat.n.01', 'synonyms': ['goat'], 'id': 501, 'def': 'a common goat', 'name': 'goat'}, {'frequency': 'f', 'synset': 'goggles.n.01', 'synonyms': ['goggles'], 'id': 502, 'def': 'tight-fitting spectacles worn to protect the eyes', 'name': 'goggles'}, {'frequency': 'r', 'synset': 'goldfish.n.01', 'synonyms': ['goldfish'], 'id': 503, 'def': 'small golden or orange-red freshwater fishes used as pond or aquarium pets', 'name': 'goldfish'}, {'frequency': 'c', 'synset': 'golf_club.n.02', 'synonyms': ['golf_club', 'golf-club'], 'id': 504, 'def': 'golf equipment used by a golfer to hit a golf ball', 'name': 'golf_club'}, {'frequency': 'c', 'synset': 'golfcart.n.01', 'synonyms': ['golfcart'], 'id': 505, 'def': 'a small motor vehicle in which golfers can ride between shots', 'name': 'golfcart'}, {'frequency': 'r', 'synset': 'gondola.n.02', 'synonyms': ['gondola_(boat)'], 'id': 506, 'def': 'long narrow flat-bottomed boat propelled by sculling; traditionally used on canals of Venice', 'name': 'gondola_(boat)'}, {'frequency': 'c', 'synset': 'goose.n.01', 'synonyms': ['goose'], 'id': 507, 'def': 'loud, web-footed long-necked aquatic birds usually larger than ducks', 'name': 'goose'}, {'frequency': 'r', 'synset': 'gorilla.n.01', 'synonyms': ['gorilla'], 'id': 508, 'def': 'largest ape', 'name': 'gorilla'}, {'frequency': 'r', 'synset': 'gourd.n.02', 'synonyms': ['gourd'], 'id': 509, 'def': 'any of numerous inedible fruits with hard rinds', 'name': 'gourd'}, {'frequency': 'f', 'synset': 'grape.n.01', 'synonyms': ['grape'], 'id': 510, 'def': 'any of various juicy fruit with green or purple skins; grow in clusters', 'name': 'grape'}, {'frequency': 'c', 'synset': 'grater.n.01', 'synonyms': ['grater'], 'id': 511, 'def': 'utensil with sharp perforations for shredding foods (as vegetables or cheese)', 'name': 'grater'}, {'frequency': 'c', 'synset': 'gravestone.n.01', 'synonyms': ['gravestone', 'headstone', 'tombstone'], 'id': 512, 'def': 'a stone that is used to mark a grave', 'name': 'gravestone'}, {'frequency': 'r', 'synset': 'gravy_boat.n.01', 'synonyms': ['gravy_boat', 'gravy_holder'], 'id': 513, 'def': 'a dish (often boat-shaped) for serving gravy or sauce', 'name': 'gravy_boat'}, {'frequency': 'f', 'synset': 'green_bean.n.02', 'synonyms': ['green_bean'], 'id': 514, 'def': 'a common bean plant cultivated for its slender green edible pods', 'name': 'green_bean'}, {'frequency': 'f', 'synset': 'green_onion.n.01', 'synonyms': ['green_onion', 'spring_onion', 'scallion'], 'id': 515, 'def': 'a young onion before the bulb has enlarged', 'name': 'green_onion'}, {'frequency': 'r', 'synset': 'griddle.n.01', 'synonyms': ['griddle'], 'id': 516, 'def': 'cooking utensil consisting of a flat heated surface on which food is cooked', 'name': 'griddle'}, {'frequency': 'f', 'synset': 'grill.n.02', 'synonyms': ['grill', 'grille', 'grillwork', 'radiator_grille'], 'id': 517, 'def': 'a framework of metal bars used as a partition or a grate', 'name': 'grill'}, {'frequency': 'r', 'synset': 'grits.n.01', 'synonyms': ['grits', 'hominy_grits'], 'id': 518, 'def': 'coarsely ground corn boiled as a breakfast dish', 'name': 'grits'}, {'frequency': 'c', 'synset': 'grizzly.n.01', 'synonyms': ['grizzly', 'grizzly_bear'], 'id': 519, 'def': 'powerful brownish-yellow bear of the uplands of western North America', 'name': 'grizzly'}, {'frequency': 'c', 'synset': 'grocery_bag.n.01', 'synonyms': ['grocery_bag'], 'id': 520, 'def': "a sack for holding customer's groceries", 'name': 'grocery_bag'}, {'frequency': 'f', 'synset': 'guitar.n.01', 'synonyms': ['guitar'], 'id': 521, 'def': 'a stringed instrument usually having six strings; played by strumming or plucking', 'name': 'guitar'}, {'frequency': 'c', 'synset': 'gull.n.02', 'synonyms': ['gull', 'seagull'], 'id': 522, 'def': 'mostly white aquatic bird having long pointed wings and short legs', 'name': 'gull'}, {'frequency': 'c', 'synset': 'gun.n.01', 'synonyms': ['gun'], 'id': 523, 'def': 'a weapon that discharges a bullet at high velocity from a metal tube', 'name': 'gun'}, {'frequency': 'f', 'synset': 'hairbrush.n.01', 'synonyms': ['hairbrush'], 'id': 524, 'def': "a brush used to groom a person's hair", 'name': 'hairbrush'}, {'frequency': 'c', 'synset': 'hairnet.n.01', 'synonyms': ['hairnet'], 'id': 525, 'def': 'a small net that someone wears over their hair to keep it in place', 'name': 'hairnet'}, {'frequency': 'c', 'synset': 'hairpin.n.01', 'synonyms': ['hairpin'], 'id': 526, 'def': "a double pronged pin used to hold women's hair in place", 'name': 'hairpin'}, {'frequency': 'r', 'synset': 'halter.n.03', 'synonyms': ['halter_top'], 'id': 527, 'def': "a woman's top that fastens behind the back and neck leaving the back and arms uncovered", 'name': 'halter_top'}, {'frequency': 'f', 'synset': 'ham.n.01', 'synonyms': ['ham', 'jambon', 'gammon'], 'id': 528, 'def': 'meat cut from the thigh of a hog (usually smoked)', 'name': 'ham'}, {'frequency': 'c', 'synset': 'hamburger.n.01', 'synonyms': ['hamburger', 'beefburger', 'burger'], 'id': 529, 'def': 'a sandwich consisting of a patty of minced beef served on a bun', 'name': 'hamburger'}, {'frequency': 'c', 'synset': 'hammer.n.02', 'synonyms': ['hammer'], 'id': 530, 'def': 'a hand tool with a heavy head and a handle; used to deliver an impulsive force by striking', 'name': 'hammer'}, {'frequency': 'c', 'synset': 'hammock.n.02', 'synonyms': ['hammock'], 'id': 531, 'def': 'a hanging bed of canvas or rope netting (usually suspended between two trees)', 'name': 'hammock'}, {'frequency': 'r', 'synset': 'hamper.n.02', 'synonyms': ['hamper'], 'id': 532, 'def': 'a basket usually with a cover', 'name': 'hamper'}, {'frequency': 'c', 'synset': 'hamster.n.01', 'synonyms': ['hamster'], 'id': 533, 'def': 'short-tailed burrowing rodent with large cheek pouches', 'name': 'hamster'}, {'frequency': 'f', 'synset': 'hand_blower.n.01', 'synonyms': ['hair_dryer'], 'id': 534, 'def': 'a hand-held electric blower that can blow warm air onto the hair', 'name': 'hair_dryer'}, {'frequency': 'r', 'synset': 'hand_glass.n.01', 'synonyms': ['hand_glass', 'hand_mirror'], 'id': 535, 'def': 'a mirror intended to be held in the hand', 'name': 'hand_glass'}, {'frequency': 'f', 'synset': 'hand_towel.n.01', 'synonyms': ['hand_towel', 'face_towel'], 'id': 536, 'def': 'a small towel used to dry the hands or face', 'name': 'hand_towel'}, {'frequency': 'c', 'synset': 'handcart.n.01', 'synonyms': ['handcart', 'pushcart', 'hand_truck'], 'id': 537, 'def': 'wheeled vehicle that can be pushed by a person', 'name': 'handcart'}, {'frequency': 'r', 'synset': 'handcuff.n.01', 'synonyms': ['handcuff'], 'id': 538, 'def': 'shackle that consists of a metal loop that can be locked around the wrist', 'name': 'handcuff'}, {'frequency': 'c', 'synset': 'handkerchief.n.01', 'synonyms': ['handkerchief'], 'id': 539, 'def': 'a square piece of cloth used for wiping the eyes or nose or as a costume accessory', 'name': 'handkerchief'}, {'frequency': 'f', 'synset': 'handle.n.01', 'synonyms': ['handle', 'grip', 'handgrip'], 'id': 540, 'def': 'the appendage to an object that is designed to be held in order to use or move it', 'name': 'handle'}, {'frequency': 'r', 'synset': 'handsaw.n.01', 'synonyms': ['handsaw', "carpenter's_saw"], 'id': 541, 'def': 'a saw used with one hand for cutting wood', 'name': 'handsaw'}, {'frequency': 'r', 'synset': 'hardback.n.01', 'synonyms': ['hardback_book', 'hardcover_book'], 'id': 542, 'def': 'a book with cardboard or cloth or leather covers', 'name': 'hardback_book'}, {'frequency': 'r', 'synset': 'harmonium.n.01', 'synonyms': ['harmonium', 'organ_(musical_instrument)', 'reed_organ_(musical_instrument)'], 'id': 543, 'def': 'a free-reed instrument in which air is forced through the reeds by bellows', 'name': 'harmonium'}, {'frequency': 'f', 'synset': 'hat.n.01', 'synonyms': ['hat'], 'id': 544, 'def': 'headwear that protects the head from bad weather, sun, or worn for fashion', 'name': 'hat'}, {'frequency': 'r', 'synset': 'hatbox.n.01', 'synonyms': ['hatbox'], 'id': 545, 'def': 'a round piece of luggage for carrying hats', 'name': 'hatbox'}, {'frequency': 'c', 'synset': 'head_covering.n.01', 'synonyms': ['veil'], 'id': 546, 'def': 'a garment that covers the head OR face', 'name': 'veil'}, {'frequency': 'f', 'synset': 'headband.n.01', 'synonyms': ['headband'], 'id': 547, 'def': 'a band worn around or over the head', 'name': 'headband'}, {'frequency': 'f', 'synset': 'headboard.n.01', 'synonyms': ['headboard'], 'id': 548, 'def': 'a vertical board or panel forming the head of a bedstead', 'name': 'headboard'}, {'frequency': 'f', 'synset': 'headlight.n.01', 'synonyms': ['headlight', 'headlamp'], 'id': 549, 'def': 'a powerful light with reflector; attached to the front of an automobile or locomotive', 'name': 'headlight'}, {'frequency': 'c', 'synset': 'headscarf.n.01', 'synonyms': ['headscarf'], 'id': 550, 'def': 'a kerchief worn over the head and tied under the chin', 'name': 'headscarf'}, {'frequency': 'r', 'synset': 'headset.n.01', 'synonyms': ['headset'], 'id': 551, 'def': 'receiver consisting of a pair of headphones', 'name': 'headset'}, {'frequency': 'c', 'synset': 'headstall.n.01', 'synonyms': ['headstall_(for_horses)', 'headpiece_(for_horses)'], 'id': 552, 'def': "the band that is the part of a bridle that fits around a horse's head", 'name': 'headstall_(for_horses)'}, {'frequency': 'c', 'synset': 'heart.n.02', 'synonyms': ['heart'], 'id': 553, 'def': 'a muscular organ; its contractions move the blood through the body', 'name': 'heart'}, {'frequency': 'c', 'synset': 'heater.n.01', 'synonyms': ['heater', 'warmer'], 'id': 554, 'def': 'device that heats water or supplies warmth to a room', 'name': 'heater'}, {'frequency': 'c', 'synset': 'helicopter.n.01', 'synonyms': ['helicopter'], 'id': 555, 'def': 'an aircraft without wings that obtains its lift from the rotation of overhead blades', 'name': 'helicopter'}, {'frequency': 'f', 'synset': 'helmet.n.02', 'synonyms': ['helmet'], 'id': 556, 'def': 'a protective headgear made of hard material to resist blows', 'name': 'helmet'}, {'frequency': 'r', 'synset': 'heron.n.02', 'synonyms': ['heron'], 'id': 557, 'def': 'grey or white wading bird with long neck and long legs and (usually) long bill', 'name': 'heron'}, {'frequency': 'c', 'synset': 'highchair.n.01', 'synonyms': ['highchair', 'feeding_chair'], 'id': 558, 'def': 'a chair for feeding a very young child', 'name': 'highchair'}, {'frequency': 'f', 'synset': 'hinge.n.01', 'synonyms': ['hinge'], 'id': 559, 'def': 'a joint that holds two parts together so that one can swing relative to the other', 'name': 'hinge'}, {'frequency': 'r', 'synset': 'hippopotamus.n.01', 'synonyms': ['hippopotamus'], 'id': 560, 'def': 'massive thick-skinned animal living in or around rivers of tropical Africa', 'name': 'hippopotamus'}, {'frequency': 'r', 'synset': 'hockey_stick.n.01', 'synonyms': ['hockey_stick'], 'id': 561, 'def': 'sports implement consisting of a stick used by hockey players to move the puck', 'name': 'hockey_stick'}, {'frequency': 'c', 'synset': 'hog.n.03', 'synonyms': ['hog', 'pig'], 'id': 562, 'def': 'domestic swine', 'name': 'hog'}, {'frequency': 'f', 'synset': 'home_plate.n.01', 'synonyms': ['home_plate_(baseball)', 'home_base_(baseball)'], 'id': 563, 'def': '(baseball) a rubber slab where the batter stands; it must be touched by a base runner in order to score', 'name': 'home_plate_(baseball)'}, {'frequency': 'c', 'synset': 'honey.n.01', 'synonyms': ['honey'], 'id': 564, 'def': 'a sweet yellow liquid produced by bees', 'name': 'honey'}, {'frequency': 'f', 'synset': 'hood.n.06', 'synonyms': ['fume_hood', 'exhaust_hood'], 'id': 565, 'def': 'metal covering leading to a vent that exhausts smoke or fumes', 'name': 'fume_hood'}, {'frequency': 'f', 'synset': 'hook.n.05', 'synonyms': ['hook'], 'id': 566, 'def': 'a curved or bent implement for suspending or pulling something', 'name': 'hook'}, {'frequency': 'r', 'synset': 'hookah.n.01', 'synonyms': ['hookah', 'narghile', 'nargileh', 'sheesha', 'shisha', 'water_pipe'], 'id': 567, 'def': 'a tobacco pipe with a long flexible tube connected to a container where the smoke is cooled by passing through water', 'name': 'hookah'}, {'frequency': 'r', 'synset': 'hornet.n.01', 'synonyms': ['hornet'], 'id': 568, 'def': 'large stinging wasp', 'name': 'hornet'}, {'frequency': 'f', 'synset': 'horse.n.01', 'synonyms': ['horse'], 'id': 569, 'def': 'a common horse', 'name': 'horse'}, {'frequency': 'f', 'synset': 'hose.n.03', 'synonyms': ['hose', 'hosepipe'], 'id': 570, 'def': 'a flexible pipe for conveying a liquid or gas', 'name': 'hose'}, {'frequency': 'r', 'synset': 'hot-air_balloon.n.01', 'synonyms': ['hot-air_balloon'], 'id': 571, 'def': 'balloon for travel through the air in a basket suspended below a large bag of heated air', 'name': 'hot-air_balloon'}, {'frequency': 'r', 'synset': 'hot_plate.n.01', 'synonyms': ['hotplate'], 'id': 572, 'def': 'a portable electric appliance for heating or cooking or keeping food warm', 'name': 'hotplate'}, {'frequency': 'c', 'synset': 'hot_sauce.n.01', 'synonyms': ['hot_sauce'], 'id': 573, 'def': 'a pungent peppery sauce', 'name': 'hot_sauce'}, {'frequency': 'r', 'synset': 'hourglass.n.01', 'synonyms': ['hourglass'], 'id': 574, 'def': 'a sandglass timer that runs for sixty minutes', 'name': 'hourglass'}, {'frequency': 'r', 'synset': 'houseboat.n.01', 'synonyms': ['houseboat'], 'id': 575, 'def': 'a barge that is designed and equipped for use as a dwelling', 'name': 'houseboat'}, {'frequency': 'c', 'synset': 'hummingbird.n.01', 'synonyms': ['hummingbird'], 'id': 576, 'def': 'tiny American bird having brilliant iridescent plumage and long slender bills', 'name': 'hummingbird'}, {'frequency': 'r', 'synset': 'hummus.n.01', 'synonyms': ['hummus', 'humus', 'hommos', 'hoummos', 'humous'], 'id': 577, 'def': 'a thick spread made from mashed chickpeas', 'name': 'hummus'}, {'frequency': 'f', 'synset': 'ice_bear.n.01', 'synonyms': ['polar_bear'], 'id': 578, 'def': 'white bear of Arctic regions', 'name': 'polar_bear'}, {'frequency': 'c', 'synset': 'ice_cream.n.01', 'synonyms': ['icecream'], 'id': 579, 'def': 'frozen dessert containing cream and sugar and flavoring', 'name': 'icecream'}, {'frequency': 'r', 'synset': 'ice_lolly.n.01', 'synonyms': ['popsicle'], 'id': 580, 'def': 'ice cream or water ice on a small wooden stick', 'name': 'popsicle'}, {'frequency': 'c', 'synset': 'ice_maker.n.01', 'synonyms': ['ice_maker'], 'id': 581, 'def': 'an appliance included in some electric refrigerators for making ice cubes', 'name': 'ice_maker'}, {'frequency': 'r', 'synset': 'ice_pack.n.01', 'synonyms': ['ice_pack', 'ice_bag'], 'id': 582, 'def': 'a waterproof bag filled with ice: applied to the body (especially the head) to cool or reduce swelling', 'name': 'ice_pack'}, {'frequency': 'r', 'synset': 'ice_skate.n.01', 'synonyms': ['ice_skate'], 'id': 583, 'def': 'skate consisting of a boot with a steel blade fitted to the sole', 'name': 'ice_skate'}, {'frequency': 'c', 'synset': 'igniter.n.01', 'synonyms': ['igniter', 'ignitor', 'lighter'], 'id': 584, 'def': 'a substance or device used to start a fire', 'name': 'igniter'}, {'frequency': 'r', 'synset': 'inhaler.n.01', 'synonyms': ['inhaler', 'inhalator'], 'id': 585, 'def': 'a dispenser that produces a chemical vapor to be inhaled through mouth or nose', 'name': 'inhaler'}, {'frequency': 'f', 'synset': 'ipod.n.01', 'synonyms': ['iPod'], 'id': 586, 'def': 'a pocket-sized device used to play music files', 'name': 'iPod'}, {'frequency': 'c', 'synset': 'iron.n.04', 'synonyms': ['iron_(for_clothing)', 'smoothing_iron_(for_clothing)'], 'id': 587, 'def': 'home appliance consisting of a flat metal base that is heated and used to smooth cloth', 'name': 'iron_(for_clothing)'}, {'frequency': 'c', 'synset': 'ironing_board.n.01', 'synonyms': ['ironing_board'], 'id': 588, 'def': 'narrow padded board on collapsible supports; used for ironing clothes', 'name': 'ironing_board'}, {'frequency': 'f', 'synset': 'jacket.n.01', 'synonyms': ['jacket'], 'id': 589, 'def': 'a waist-length coat', 'name': 'jacket'}, {'frequency': 'c', 'synset': 'jam.n.01', 'synonyms': ['jam'], 'id': 590, 'def': 'preserve of crushed fruit', 'name': 'jam'}, {'frequency': 'f', 'synset': 'jar.n.01', 'synonyms': ['jar'], 'id': 591, 'def': 'a vessel (usually cylindrical) with a wide mouth and without handles', 'name': 'jar'}, {'frequency': 'f', 'synset': 'jean.n.01', 'synonyms': ['jean', 'blue_jean', 'denim'], 'id': 592, 'def': '(usually plural) close-fitting trousers of heavy denim for manual work or casual wear', 'name': 'jean'}, {'frequency': 'c', 'synset': 'jeep.n.01', 'synonyms': ['jeep', 'landrover'], 'id': 593, 'def': 'a car suitable for traveling over rough terrain', 'name': 'jeep'}, {'frequency': 'r', 'synset': 'jelly_bean.n.01', 'synonyms': ['jelly_bean', 'jelly_egg'], 'id': 594, 'def': 'sugar-glazed jellied candy', 'name': 'jelly_bean'}, {'frequency': 'f', 'synset': 'jersey.n.03', 'synonyms': ['jersey', 'T-shirt', 'tee_shirt'], 'id': 595, 'def': 'a close-fitting pullover shirt', 'name': 'jersey'}, {'frequency': 'c', 'synset': 'jet.n.01', 'synonyms': ['jet_plane', 'jet-propelled_plane'], 'id': 596, 'def': 'an airplane powered by one or more jet engines', 'name': 'jet_plane'}, {'frequency': 'r', 'synset': 'jewel.n.01', 'synonyms': ['jewel', 'gem', 'precious_stone'], 'id': 597, 'def': 'a precious or semiprecious stone incorporated into a piece of jewelry', 'name': 'jewel'}, {'frequency': 'c', 'synset': 'jewelry.n.01', 'synonyms': ['jewelry', 'jewellery'], 'id': 598, 'def': 'an adornment (as a bracelet or ring or necklace) made of precious metals and set with gems (or imitation gems)', 'name': 'jewelry'}, {'frequency': 'r', 'synset': 'joystick.n.02', 'synonyms': ['joystick'], 'id': 599, 'def': 'a control device for computers consisting of a vertical handle that can move freely in two directions', 'name': 'joystick'}, {'frequency': 'c', 'synset': 'jump_suit.n.01', 'synonyms': ['jumpsuit'], 'id': 600, 'def': "one-piece garment fashioned after a parachutist's uniform", 'name': 'jumpsuit'}, {'frequency': 'c', 'synset': 'kayak.n.01', 'synonyms': ['kayak'], 'id': 601, 'def': 'a small canoe consisting of a light frame made watertight with animal skins', 'name': 'kayak'}, {'frequency': 'r', 'synset': 'keg.n.02', 'synonyms': ['keg'], 'id': 602, 'def': 'small cask or barrel', 'name': 'keg'}, {'frequency': 'r', 'synset': 'kennel.n.01', 'synonyms': ['kennel', 'doghouse'], 'id': 603, 'def': 'outbuilding that serves as a shelter for a dog', 'name': 'kennel'}, {'frequency': 'c', 'synset': 'kettle.n.01', 'synonyms': ['kettle', 'boiler'], 'id': 604, 'def': 'a metal pot for stewing or boiling; usually has a lid', 'name': 'kettle'}, {'frequency': 'f', 'synset': 'key.n.01', 'synonyms': ['key'], 'id': 605, 'def': 'metal instrument used to unlock a lock', 'name': 'key'}, {'frequency': 'r', 'synset': 'keycard.n.01', 'synonyms': ['keycard'], 'id': 606, 'def': 'a plastic card used to gain access typically to a door', 'name': 'keycard'}, {'frequency': 'c', 'synset': 'kilt.n.01', 'synonyms': ['kilt'], 'id': 607, 'def': 'a knee-length pleated tartan skirt worn by men as part of the traditional dress in the Highlands of northern Scotland', 'name': 'kilt'}, {'frequency': 'c', 'synset': 'kimono.n.01', 'synonyms': ['kimono'], 'id': 608, 'def': 'a loose robe; imitated from robes originally worn by Japanese', 'name': 'kimono'}, {'frequency': 'f', 'synset': 'kitchen_sink.n.01', 'synonyms': ['kitchen_sink'], 'id': 609, 'def': 'a sink in a kitchen', 'name': 'kitchen_sink'}, {'frequency': 'r', 'synset': 'kitchen_table.n.01', 'synonyms': ['kitchen_table'], 'id': 610, 'def': 'a table in the kitchen', 'name': 'kitchen_table'}, {'frequency': 'f', 'synset': 'kite.n.03', 'synonyms': ['kite'], 'id': 611, 'def': 'plaything consisting of a light frame covered with tissue paper; flown in wind at end of a string', 'name': 'kite'}, {'frequency': 'c', 'synset': 'kitten.n.01', 'synonyms': ['kitten', 'kitty'], 'id': 612, 'def': 'young domestic cat', 'name': 'kitten'}, {'frequency': 'c', 'synset': 'kiwi.n.03', 'synonyms': ['kiwi_fruit'], 'id': 613, 'def': 'fuzzy brown egg-shaped fruit with slightly tart green flesh', 'name': 'kiwi_fruit'}, {'frequency': 'f', 'synset': 'knee_pad.n.01', 'synonyms': ['knee_pad'], 'id': 614, 'def': 'protective garment consisting of a pad worn by football or baseball or hockey players', 'name': 'knee_pad'}, {'frequency': 'f', 'synset': 'knife.n.01', 'synonyms': ['knife'], 'id': 615, 'def': 'tool with a blade and point used as a cutting instrument', 'name': 'knife'}, {'frequency': 'r', 'synset': 'knitting_needle.n.01', 'synonyms': ['knitting_needle'], 'id': 616, 'def': 'needle consisting of a slender rod with pointed ends; usually used in pairs', 'name': 'knitting_needle'}, {'frequency': 'f', 'synset': 'knob.n.02', 'synonyms': ['knob'], 'id': 617, 'def': 'a round handle often found on a door', 'name': 'knob'}, {'frequency': 'r', 'synset': 'knocker.n.05', 'synonyms': ['knocker_(on_a_door)', 'doorknocker'], 'id': 618, 'def': 'a device (usually metal and ornamental) attached by a hinge to a door', 'name': 'knocker_(on_a_door)'}, {'frequency': 'r', 'synset': 'koala.n.01', 'synonyms': ['koala', 'koala_bear'], 'id': 619, 'def': 'sluggish tailless Australian marsupial with grey furry ears and coat', 'name': 'koala'}, {'frequency': 'r', 'synset': 'lab_coat.n.01', 'synonyms': ['lab_coat', 'laboratory_coat'], 'id': 620, 'def': 'a light coat worn to protect clothing from substances used while working in a laboratory', 'name': 'lab_coat'}, {'frequency': 'f', 'synset': 'ladder.n.01', 'synonyms': ['ladder'], 'id': 621, 'def': 'steps consisting of two parallel members connected by rungs', 'name': 'ladder'}, {'frequency': 'c', 'synset': 'ladle.n.01', 'synonyms': ['ladle'], 'id': 622, 'def': 'a spoon-shaped vessel with a long handle frequently used to transfer liquids', 'name': 'ladle'}, {'frequency': 'c', 'synset': 'ladybug.n.01', 'synonyms': ['ladybug', 'ladybeetle', 'ladybird_beetle'], 'id': 623, 'def': 'small round bright-colored and spotted beetle, typically red and black', 'name': 'ladybug'}, {'frequency': 'f', 'synset': 'lamb.n.01', 'synonyms': ['lamb_(animal)'], 'id': 624, 'def': 'young sheep', 'name': 'lamb_(animal)'}, {'frequency': 'r', 'synset': 'lamb_chop.n.01', 'synonyms': ['lamb-chop', 'lambchop'], 'id': 625, 'def': 'chop cut from a lamb', 'name': 'lamb-chop'}, {'frequency': 'f', 'synset': 'lamp.n.02', 'synonyms': ['lamp'], 'id': 626, 'def': 'a piece of furniture holding one or more electric light bulbs', 'name': 'lamp'}, {'frequency': 'f', 'synset': 'lamppost.n.01', 'synonyms': ['lamppost'], 'id': 627, 'def': 'a metal post supporting an outdoor lamp (such as a streetlight)', 'name': 'lamppost'}, {'frequency': 'f', 'synset': 'lampshade.n.01', 'synonyms': ['lampshade'], 'id': 628, 'def': 'a protective ornamental shade used to screen a light bulb from direct view', 'name': 'lampshade'}, {'frequency': 'c', 'synset': 'lantern.n.01', 'synonyms': ['lantern'], 'id': 629, 'def': 'light in a transparent protective case', 'name': 'lantern'}, {'frequency': 'f', 'synset': 'lanyard.n.02', 'synonyms': ['lanyard', 'laniard'], 'id': 630, 'def': 'a cord worn around the neck to hold a knife or whistle, etc.', 'name': 'lanyard'}, {'frequency': 'f', 'synset': 'laptop.n.01', 'synonyms': ['laptop_computer', 'notebook_computer'], 'id': 631, 'def': 'a portable computer small enough to use in your lap', 'name': 'laptop_computer'}, {'frequency': 'r', 'synset': 'lasagna.n.01', 'synonyms': ['lasagna', 'lasagne'], 'id': 632, 'def': 'baked dish of layers of lasagna pasta with sauce and cheese and meat or vegetables', 'name': 'lasagna'}, {'frequency': 'f', 'synset': 'latch.n.02', 'synonyms': ['latch'], 'id': 633, 'def': 'a bar that can be lowered or slid into a groove to fasten a door or gate', 'name': 'latch'}, {'frequency': 'r', 'synset': 'lawn_mower.n.01', 'synonyms': ['lawn_mower'], 'id': 634, 'def': 'garden tool for mowing grass on lawns', 'name': 'lawn_mower'}, {'frequency': 'r', 'synset': 'leather.n.01', 'synonyms': ['leather'], 'id': 635, 'def': 'an animal skin made smooth and flexible by removing the hair and then tanning', 'name': 'leather'}, {'frequency': 'c', 'synset': 'legging.n.01', 'synonyms': ['legging_(clothing)', 'leging_(clothing)', 'leg_covering'], 'id': 636, 'def': 'a garment covering the leg (usually extending from the knee to the ankle)', 'name': 'legging_(clothing)'}, {'frequency': 'c', 'synset': 'lego.n.01', 'synonyms': ['Lego', 'Lego_set'], 'id': 637, 'def': "a child's plastic construction set for making models from blocks", 'name': 'Lego'}, {'frequency': 'r', 'synset': 'legume.n.02', 'synonyms': ['legume'], 'id': 638, 'def': 'the fruit or seed of bean or pea plants', 'name': 'legume'}, {'frequency': 'f', 'synset': 'lemon.n.01', 'synonyms': ['lemon'], 'id': 639, 'def': 'yellow oval fruit with juicy acidic flesh', 'name': 'lemon'}, {'frequency': 'r', 'synset': 'lemonade.n.01', 'synonyms': ['lemonade'], 'id': 640, 'def': 'sweetened beverage of diluted lemon juice', 'name': 'lemonade'}, {'frequency': 'f', 'synset': 'lettuce.n.02', 'synonyms': ['lettuce'], 'id': 641, 'def': 'leafy plant commonly eaten in salad or on sandwiches', 'name': 'lettuce'}, {'frequency': 'f', 'synset': 'license_plate.n.01', 'synonyms': ['license_plate', 'numberplate'], 'id': 642, 'def': "a plate mounted on the front and back of car and bearing the car's registration number", 'name': 'license_plate'}, {'frequency': 'f', 'synset': 'life_buoy.n.01', 'synonyms': ['life_buoy', 'lifesaver', 'life_belt', 'life_ring'], 'id': 643, 'def': 'a ring-shaped life preserver used to prevent drowning (NOT a life-jacket or vest)', 'name': 'life_buoy'}, {'frequency': 'f', 'synset': 'life_jacket.n.01', 'synonyms': ['life_jacket', 'life_vest'], 'id': 644, 'def': 'life preserver consisting of a sleeveless jacket of buoyant or inflatable design', 'name': 'life_jacket'}, {'frequency': 'f', 'synset': 'light_bulb.n.01', 'synonyms': ['lightbulb'], 'id': 645, 'def': 'lightblub/source of light', 'name': 'lightbulb'}, {'frequency': 'r', 'synset': 'lightning_rod.n.02', 'synonyms': ['lightning_rod', 'lightning_conductor'], 'id': 646, 'def': 'a metallic conductor that is attached to a high point and leads to the ground', 'name': 'lightning_rod'}, {'frequency': 'f', 'synset': 'lime.n.06', 'synonyms': ['lime'], 'id': 647, 'def': 'the green acidic fruit of any of various lime trees', 'name': 'lime'}, {'frequency': 'r', 'synset': 'limousine.n.01', 'synonyms': ['limousine'], 'id': 648, 'def': 'long luxurious car; usually driven by a chauffeur', 'name': 'limousine'}, {'frequency': 'c', 'synset': 'lion.n.01', 'synonyms': ['lion'], 'id': 649, 'def': 'large gregarious predatory cat of Africa and India', 'name': 'lion'}, {'frequency': 'c', 'synset': 'lip_balm.n.01', 'synonyms': ['lip_balm'], 'id': 650, 'def': 'a balm applied to the lips', 'name': 'lip_balm'}, {'frequency': 'r', 'synset': 'liquor.n.01', 'synonyms': ['liquor', 'spirits', 'hard_liquor', 'liqueur', 'cordial'], 'id': 651, 'def': 'liquor or beer', 'name': 'liquor'}, {'frequency': 'c', 'synset': 'lizard.n.01', 'synonyms': ['lizard'], 'id': 652, 'def': 'a reptile with usually two pairs of legs and a tapering tail', 'name': 'lizard'}, {'frequency': 'f', 'synset': 'log.n.01', 'synonyms': ['log'], 'id': 653, 'def': 'a segment of the trunk of a tree when stripped of branches', 'name': 'log'}, {'frequency': 'c', 'synset': 'lollipop.n.02', 'synonyms': ['lollipop'], 'id': 654, 'def': 'hard candy on a stick', 'name': 'lollipop'}, {'frequency': 'f', 'synset': 'loudspeaker.n.01', 'synonyms': ['speaker_(stero_equipment)'], 'id': 655, 'def': 'electronic device that produces sound often as part of a stereo system', 'name': 'speaker_(stero_equipment)'}, {'frequency': 'c', 'synset': 'love_seat.n.01', 'synonyms': ['loveseat'], 'id': 656, 'def': 'small sofa that seats two people', 'name': 'loveseat'}, {'frequency': 'r', 'synset': 'machine_gun.n.01', 'synonyms': ['machine_gun'], 'id': 657, 'def': 'a rapidly firing automatic gun', 'name': 'machine_gun'}, {'frequency': 'f', 'synset': 'magazine.n.02', 'synonyms': ['magazine'], 'id': 658, 'def': 'a paperback periodic publication', 'name': 'magazine'}, {'frequency': 'f', 'synset': 'magnet.n.01', 'synonyms': ['magnet'], 'id': 659, 'def': 'a device that attracts iron and produces a magnetic field', 'name': 'magnet'}, {'frequency': 'c', 'synset': 'mail_slot.n.01', 'synonyms': ['mail_slot'], 'id': 660, 'def': 'a slot (usually in a door) through which mail can be delivered', 'name': 'mail_slot'}, {'frequency': 'f', 'synset': 'mailbox.n.01', 'synonyms': ['mailbox_(at_home)', 'letter_box_(at_home)'], 'id': 661, 'def': 'a private box for delivery of mail', 'name': 'mailbox_(at_home)'}, {'frequency': 'r', 'synset': 'mallard.n.01', 'synonyms': ['mallard'], 'id': 662, 'def': 'wild dabbling duck from which domestic ducks are descended', 'name': 'mallard'}, {'frequency': 'r', 'synset': 'mallet.n.01', 'synonyms': ['mallet'], 'id': 663, 'def': 'a sports implement with a long handle and a hammer-like head used to hit a ball', 'name': 'mallet'}, {'frequency': 'r', 'synset': 'mammoth.n.01', 'synonyms': ['mammoth'], 'id': 664, 'def': 'any of numerous extinct elephants widely distributed in the Pleistocene', 'name': 'mammoth'}, {'frequency': 'r', 'synset': 'manatee.n.01', 'synonyms': ['manatee'], 'id': 665, 'def': 'sirenian mammal of tropical coastal waters of America', 'name': 'manatee'}, {'frequency': 'c', 'synset': 'mandarin.n.05', 'synonyms': ['mandarin_orange'], 'id': 666, 'def': 'a somewhat flat reddish-orange loose skinned citrus of China', 'name': 'mandarin_orange'}, {'frequency': 'c', 'synset': 'manger.n.01', 'synonyms': ['manger', 'trough'], 'id': 667, 'def': 'a container (usually in a barn or stable) from which cattle or horses feed', 'name': 'manger'}, {'frequency': 'f', 'synset': 'manhole.n.01', 'synonyms': ['manhole'], 'id': 668, 'def': 'a hole (usually with a flush cover) through which a person can gain access to an underground structure', 'name': 'manhole'}, {'frequency': 'f', 'synset': 'map.n.01', 'synonyms': ['map'], 'id': 669, 'def': "a diagrammatic representation of the earth's surface (or part of it)", 'name': 'map'}, {'frequency': 'f', 'synset': 'marker.n.03', 'synonyms': ['marker'], 'id': 670, 'def': 'a writing implement for making a mark', 'name': 'marker'}, {'frequency': 'r', 'synset': 'martini.n.01', 'synonyms': ['martini'], 'id': 671, 'def': 'a cocktail made of gin (or vodka) with dry vermouth', 'name': 'martini'}, {'frequency': 'r', 'synset': 'mascot.n.01', 'synonyms': ['mascot'], 'id': 672, 'def': 'a person or animal that is adopted by a team or other group as a symbolic figure', 'name': 'mascot'}, {'frequency': 'c', 'synset': 'mashed_potato.n.01', 'synonyms': ['mashed_potato'], 'id': 673, 'def': 'potato that has been peeled and boiled and then mashed', 'name': 'mashed_potato'}, {'frequency': 'r', 'synset': 'masher.n.02', 'synonyms': ['masher'], 'id': 674, 'def': 'a kitchen utensil used for mashing (e.g. potatoes)', 'name': 'masher'}, {'frequency': 'f', 'synset': 'mask.n.04', 'synonyms': ['mask', 'facemask'], 'id': 675, 'def': 'a protective covering worn over the face', 'name': 'mask'}, {'frequency': 'f', 'synset': 'mast.n.01', 'synonyms': ['mast'], 'id': 676, 'def': 'a vertical spar for supporting sails', 'name': 'mast'}, {'frequency': 'c', 'synset': 'mat.n.03', 'synonyms': ['mat_(gym_equipment)', 'gym_mat'], 'id': 677, 'def': 'sports equipment consisting of a piece of thick padding on the floor for gymnastics', 'name': 'mat_(gym_equipment)'}, {'frequency': 'r', 'synset': 'matchbox.n.01', 'synonyms': ['matchbox'], 'id': 678, 'def': 'a box for holding matches', 'name': 'matchbox'}, {'frequency': 'f', 'synset': 'mattress.n.01', 'synonyms': ['mattress'], 'id': 679, 'def': 'a thick pad filled with resilient material used as a bed or part of a bed', 'name': 'mattress'}, {'frequency': 'c', 'synset': 'measuring_cup.n.01', 'synonyms': ['measuring_cup'], 'id': 680, 'def': 'graduated cup used to measure liquid or granular ingredients', 'name': 'measuring_cup'}, {'frequency': 'c', 'synset': 'measuring_stick.n.01', 'synonyms': ['measuring_stick', 'ruler_(measuring_stick)', 'measuring_rod'], 'id': 681, 'def': 'measuring instrument having a sequence of marks at regular intervals', 'name': 'measuring_stick'}, {'frequency': 'c', 'synset': 'meatball.n.01', 'synonyms': ['meatball'], 'id': 682, 'def': 'ground meat formed into a ball and fried or simmered in broth', 'name': 'meatball'}, {'frequency': 'c', 'synset': 'medicine.n.02', 'synonyms': ['medicine'], 'id': 683, 'def': 'something that treats or prevents or alleviates the symptoms of disease', 'name': 'medicine'}, {'frequency': 'c', 'synset': 'melon.n.01', 'synonyms': ['melon'], 'id': 684, 'def': 'fruit of the gourd family having a hard rind and sweet juicy flesh', 'name': 'melon'}, {'frequency': 'f', 'synset': 'microphone.n.01', 'synonyms': ['microphone'], 'id': 685, 'def': 'device for converting sound waves into electrical energy', 'name': 'microphone'}, {'frequency': 'r', 'synset': 'microscope.n.01', 'synonyms': ['microscope'], 'id': 686, 'def': 'magnifier of the image of small objects', 'name': 'microscope'}, {'frequency': 'f', 'synset': 'microwave.n.02', 'synonyms': ['microwave_oven'], 'id': 687, 'def': 'kitchen appliance that cooks food by passing an electromagnetic wave through it', 'name': 'microwave_oven'}, {'frequency': 'r', 'synset': 'milestone.n.01', 'synonyms': ['milestone', 'milepost'], 'id': 688, 'def': 'stone post at side of a road to show distances', 'name': 'milestone'}, {'frequency': 'f', 'synset': 'milk.n.01', 'synonyms': ['milk'], 'id': 689, 'def': 'a white nutritious liquid secreted by mammals and used as food by human beings', 'name': 'milk'}, {'frequency': 'r', 'synset': 'milk_can.n.01', 'synonyms': ['milk_can'], 'id': 690, 'def': 'can for transporting milk', 'name': 'milk_can'}, {'frequency': 'r', 'synset': 'milkshake.n.01', 'synonyms': ['milkshake'], 'id': 691, 'def': 'frothy drink of milk and flavoring and sometimes fruit or ice cream', 'name': 'milkshake'}, {'frequency': 'f', 'synset': 'minivan.n.01', 'synonyms': ['minivan'], 'id': 692, 'def': 'a small box-shaped passenger van', 'name': 'minivan'}, {'frequency': 'r', 'synset': 'mint.n.05', 'synonyms': ['mint_candy'], 'id': 693, 'def': 'a candy that is flavored with a mint oil', 'name': 'mint_candy'}, {'frequency': 'f', 'synset': 'mirror.n.01', 'synonyms': ['mirror'], 'id': 694, 'def': 'polished surface that forms images by reflecting light', 'name': 'mirror'}, {'frequency': 'c', 'synset': 'mitten.n.01', 'synonyms': ['mitten'], 'id': 695, 'def': 'glove that encases the thumb separately and the other four fingers together', 'name': 'mitten'}, {'frequency': 'c', 'synset': 'mixer.n.04', 'synonyms': ['mixer_(kitchen_tool)', 'stand_mixer'], 'id': 696, 'def': 'a kitchen utensil that is used for mixing foods', 'name': 'mixer_(kitchen_tool)'}, {'frequency': 'c', 'synset': 'money.n.03', 'synonyms': ['money'], 'id': 697, 'def': 'the official currency issued by a government or national bank', 'name': 'money'}, {'frequency': 'f', 'synset': 'monitor.n.04', 'synonyms': ['monitor_(computer_equipment) computer_monitor'], 'id': 698, 'def': 'a computer monitor', 'name': 'monitor_(computer_equipment) computer_monitor'}, {'frequency': 'c', 'synset': 'monkey.n.01', 'synonyms': ['monkey'], 'id': 699, 'def': 'any of various long-tailed primates', 'name': 'monkey'}, {'frequency': 'f', 'synset': 'motor.n.01', 'synonyms': ['motor'], 'id': 700, 'def': 'machine that converts other forms of energy into mechanical energy and so imparts motion', 'name': 'motor'}, {'frequency': 'f', 'synset': 'motor_scooter.n.01', 'synonyms': ['motor_scooter', 'scooter'], 'id': 701, 'def': 'a wheeled vehicle with small wheels and a low-powered engine', 'name': 'motor_scooter'}, {'frequency': 'r', 'synset': 'motor_vehicle.n.01', 'synonyms': ['motor_vehicle', 'automotive_vehicle'], 'id': 702, 'def': 'a self-propelled wheeled vehicle that does not run on rails', 'name': 'motor_vehicle'}, {'frequency': 'f', 'synset': 'motorcycle.n.01', 'synonyms': ['motorcycle'], 'id': 703, 'def': 'a motor vehicle with two wheels and a strong frame', 'name': 'motorcycle'}, {'frequency': 'f', 'synset': 'mound.n.01', 'synonyms': ['mound_(baseball)', "pitcher's_mound"], 'id': 704, 'def': '(baseball) the slight elevation on which the pitcher stands', 'name': 'mound_(baseball)'}, {'frequency': 'f', 'synset': 'mouse.n.04', 'synonyms': ['mouse_(computer_equipment)', 'computer_mouse'], 'id': 705, 'def': 'a computer input device that controls an on-screen pointer (does not include trackpads / touchpads)', 'name': 'mouse_(computer_equipment)'}, {'frequency': 'f', 'synset': 'mousepad.n.01', 'synonyms': ['mousepad'], 'id': 706, 'def': 'a small portable pad that provides an operating surface for a computer mouse', 'name': 'mousepad'}, {'frequency': 'c', 'synset': 'muffin.n.01', 'synonyms': ['muffin'], 'id': 707, 'def': 'a sweet quick bread baked in a cup-shaped pan', 'name': 'muffin'}, {'frequency': 'f', 'synset': 'mug.n.04', 'synonyms': ['mug'], 'id': 708, 'def': 'with handle and usually cylindrical', 'name': 'mug'}, {'frequency': 'f', 'synset': 'mushroom.n.02', 'synonyms': ['mushroom'], 'id': 709, 'def': 'a common mushroom', 'name': 'mushroom'}, {'frequency': 'r', 'synset': 'music_stool.n.01', 'synonyms': ['music_stool', 'piano_stool'], 'id': 710, 'def': 'a stool for piano players; usually adjustable in height', 'name': 'music_stool'}, {'frequency': 'c', 'synset': 'musical_instrument.n.01', 'synonyms': ['musical_instrument', 'instrument_(musical)'], 'id': 711, 'def': 'any of various devices or contrivances that can be used to produce musical tones or sounds', 'name': 'musical_instrument'}, {'frequency': 'r', 'synset': 'nailfile.n.01', 'synonyms': ['nailfile'], 'id': 712, 'def': 'a small flat file for shaping the nails', 'name': 'nailfile'}, {'frequency': 'f', 'synset': 'napkin.n.01', 'synonyms': ['napkin', 'table_napkin', 'serviette'], 'id': 713, 'def': 'a small piece of table linen or paper that is used to wipe the mouth and to cover the lap in order to protect clothing', 'name': 'napkin'}, {'frequency': 'r', 'synset': 'neckerchief.n.01', 'synonyms': ['neckerchief'], 'id': 714, 'def': 'a kerchief worn around the neck', 'name': 'neckerchief'}, {'frequency': 'f', 'synset': 'necklace.n.01', 'synonyms': ['necklace'], 'id': 715, 'def': 'jewelry consisting of a cord or chain (often bearing gems) worn about the neck as an ornament', 'name': 'necklace'}, {'frequency': 'f', 'synset': 'necktie.n.01', 'synonyms': ['necktie', 'tie_(necktie)'], 'id': 716, 'def': 'neckwear consisting of a long narrow piece of material worn under a collar and tied in knot at the front', 'name': 'necktie'}, {'frequency': 'c', 'synset': 'needle.n.03', 'synonyms': ['needle'], 'id': 717, 'def': 'a sharp pointed implement (usually metal)', 'name': 'needle'}, {'frequency': 'c', 'synset': 'nest.n.01', 'synonyms': ['nest'], 'id': 718, 'def': 'a structure in which animals lay eggs or give birth to their young', 'name': 'nest'}, {'frequency': 'f', 'synset': 'newspaper.n.01', 'synonyms': ['newspaper', 'paper_(newspaper)'], 'id': 719, 'def': 'a daily or weekly publication on folded sheets containing news, articles, and advertisements', 'name': 'newspaper'}, {'frequency': 'c', 'synset': 'newsstand.n.01', 'synonyms': ['newsstand'], 'id': 720, 'def': 'a stall where newspapers and other periodicals are sold', 'name': 'newsstand'}, {'frequency': 'c', 'synset': 'nightwear.n.01', 'synonyms': ['nightshirt', 'nightwear', 'sleepwear', 'nightclothes'], 'id': 721, 'def': 'garments designed to be worn in bed', 'name': 'nightshirt'}, {'frequency': 'r', 'synset': 'nosebag.n.01', 'synonyms': ['nosebag_(for_animals)', 'feedbag'], 'id': 722, 'def': 'a canvas bag that is used to feed an animal (such as a horse); covers the muzzle and fastens at the top of the head', 'name': 'nosebag_(for_animals)'}, {'frequency': 'c', 'synset': 'noseband.n.01', 'synonyms': ['noseband_(for_animals)', 'nosepiece_(for_animals)'], 'id': 723, 'def': "a strap that is the part of a bridle that goes over the animal's nose", 'name': 'noseband_(for_animals)'}, {'frequency': 'f', 'synset': 'notebook.n.01', 'synonyms': ['notebook'], 'id': 724, 'def': 'a book with blank pages for recording notes or memoranda', 'name': 'notebook'}, {'frequency': 'c', 'synset': 'notepad.n.01', 'synonyms': ['notepad'], 'id': 725, 'def': 'a pad of paper for keeping notes', 'name': 'notepad'}, {'frequency': 'f', 'synset': 'nut.n.03', 'synonyms': ['nut'], 'id': 726, 'def': 'a small metal block (usually square or hexagonal) with internal screw thread to be fitted onto a bolt', 'name': 'nut'}, {'frequency': 'r', 'synset': 'nutcracker.n.01', 'synonyms': ['nutcracker'], 'id': 727, 'def': 'a hand tool used to crack nuts open', 'name': 'nutcracker'}, {'frequency': 'f', 'synset': 'oar.n.01', 'synonyms': ['oar'], 'id': 728, 'def': 'an implement used to propel or steer a boat', 'name': 'oar'}, {'frequency': 'r', 'synset': 'octopus.n.01', 'synonyms': ['octopus_(food)'], 'id': 729, 'def': 'tentacles of octopus prepared as food', 'name': 'octopus_(food)'}, {'frequency': 'r', 'synset': 'octopus.n.02', 'synonyms': ['octopus_(animal)'], 'id': 730, 'def': 'bottom-living cephalopod having a soft oval body with eight long tentacles', 'name': 'octopus_(animal)'}, {'frequency': 'c', 'synset': 'oil_lamp.n.01', 'synonyms': ['oil_lamp', 'kerosene_lamp', 'kerosine_lamp'], 'id': 731, 'def': 'a lamp that burns oil (as kerosine) for light', 'name': 'oil_lamp'}, {'frequency': 'c', 'synset': 'olive_oil.n.01', 'synonyms': ['olive_oil'], 'id': 732, 'def': 'oil from olives', 'name': 'olive_oil'}, {'frequency': 'r', 'synset': 'omelet.n.01', 'synonyms': ['omelet', 'omelette'], 'id': 733, 'def': 'beaten eggs cooked until just set; may be folded around e.g. ham or cheese or jelly', 'name': 'omelet'}, {'frequency': 'f', 'synset': 'onion.n.01', 'synonyms': ['onion'], 'id': 734, 'def': 'the bulb of an onion plant', 'name': 'onion'}, {'frequency': 'f', 'synset': 'orange.n.01', 'synonyms': ['orange_(fruit)'], 'id': 735, 'def': 'orange (FRUIT of an orange tree)', 'name': 'orange_(fruit)'}, {'frequency': 'c', 'synset': 'orange_juice.n.01', 'synonyms': ['orange_juice'], 'id': 736, 'def': 'bottled or freshly squeezed juice of oranges', 'name': 'orange_juice'}, {'frequency': 'c', 'synset': 'ostrich.n.02', 'synonyms': ['ostrich'], 'id': 737, 'def': 'fast-running African flightless bird with two-toed feet; largest living bird', 'name': 'ostrich'}, {'frequency': 'f', 'synset': 'ottoman.n.03', 'synonyms': ['ottoman', 'pouf', 'pouffe', 'hassock'], 'id': 738, 'def': 'a thick standalone cushion used as a seat or footrest, often next to a chair', 'name': 'ottoman'}, {'frequency': 'f', 'synset': 'oven.n.01', 'synonyms': ['oven'], 'id': 739, 'def': 'kitchen appliance used for baking or roasting', 'name': 'oven'}, {'frequency': 'c', 'synset': 'overall.n.01', 'synonyms': ['overalls_(clothing)'], 'id': 740, 'def': 'work clothing consisting of denim trousers usually with a bib and shoulder straps', 'name': 'overalls_(clothing)'}, {'frequency': 'c', 'synset': 'owl.n.01', 'synonyms': ['owl'], 'id': 741, 'def': 'nocturnal bird of prey with hawk-like beak and claws and large head with front-facing eyes', 'name': 'owl'}, {'frequency': 'c', 'synset': 'packet.n.03', 'synonyms': ['packet'], 'id': 742, 'def': 'a small package or bundle', 'name': 'packet'}, {'frequency': 'r', 'synset': 'pad.n.03', 'synonyms': ['inkpad', 'inking_pad', 'stamp_pad'], 'id': 743, 'def': 'absorbent material saturated with ink used to transfer ink evenly to a rubber stamp', 'name': 'inkpad'}, {'frequency': 'c', 'synset': 'pad.n.04', 'synonyms': ['pad'], 'id': 744, 'def': 'mostly arm/knee pads labeled', 'name': 'pad'}, {'frequency': 'f', 'synset': 'paddle.n.04', 'synonyms': ['paddle', 'boat_paddle'], 'id': 745, 'def': 'a short light oar used without an oarlock to propel a canoe or small boat', 'name': 'paddle'}, {'frequency': 'c', 'synset': 'padlock.n.01', 'synonyms': ['padlock'], 'id': 746, 'def': 'a detachable, portable lock', 'name': 'padlock'}, {'frequency': 'c', 'synset': 'paintbrush.n.01', 'synonyms': ['paintbrush'], 'id': 747, 'def': 'a brush used as an applicator to apply paint', 'name': 'paintbrush'}, {'frequency': 'f', 'synset': 'painting.n.01', 'synonyms': ['painting'], 'id': 748, 'def': 'graphic art consisting of an artistic composition made by applying paints to a surface', 'name': 'painting'}, {'frequency': 'f', 'synset': 'pajama.n.02', 'synonyms': ['pajamas', 'pyjamas'], 'id': 749, 'def': 'loose-fitting nightclothes worn for sleeping or lounging', 'name': 'pajamas'}, {'frequency': 'c', 'synset': 'palette.n.02', 'synonyms': ['palette', 'pallet'], 'id': 750, 'def': 'board that provides a flat surface on which artists mix paints and the range of colors used', 'name': 'palette'}, {'frequency': 'f', 'synset': 'pan.n.01', 'synonyms': ['pan_(for_cooking)', 'cooking_pan'], 'id': 751, 'def': 'cooking utensil consisting of a wide metal vessel', 'name': 'pan_(for_cooking)'}, {'frequency': 'r', 'synset': 'pan.n.03', 'synonyms': ['pan_(metal_container)'], 'id': 752, 'def': 'shallow container made of metal', 'name': 'pan_(metal_container)'}, {'frequency': 'c', 'synset': 'pancake.n.01', 'synonyms': ['pancake'], 'id': 753, 'def': 'a flat cake of thin batter fried on both sides on a griddle', 'name': 'pancake'}, {'frequency': 'r', 'synset': 'pantyhose.n.01', 'synonyms': ['pantyhose'], 'id': 754, 'def': "a woman's tights consisting of underpants and stockings", 'name': 'pantyhose'}, {'frequency': 'r', 'synset': 'papaya.n.02', 'synonyms': ['papaya'], 'id': 755, 'def': 'large oval melon-like tropical fruit with yellowish flesh', 'name': 'papaya'}, {'frequency': 'f', 'synset': 'paper_plate.n.01', 'synonyms': ['paper_plate'], 'id': 756, 'def': 'a disposable plate made of cardboard', 'name': 'paper_plate'}, {'frequency': 'f', 'synset': 'paper_towel.n.01', 'synonyms': ['paper_towel'], 'id': 757, 'def': 'a disposable towel made of absorbent paper', 'name': 'paper_towel'}, {'frequency': 'r', 'synset': 'paperback_book.n.01', 'synonyms': ['paperback_book', 'paper-back_book', 'softback_book', 'soft-cover_book'], 'id': 758, 'def': 'a book with paper covers', 'name': 'paperback_book'}, {'frequency': 'r', 'synset': 'paperweight.n.01', 'synonyms': ['paperweight'], 'id': 759, 'def': 'a weight used to hold down a stack of papers', 'name': 'paperweight'}, {'frequency': 'c', 'synset': 'parachute.n.01', 'synonyms': ['parachute'], 'id': 760, 'def': 'rescue equipment consisting of a device that fills with air and retards your fall', 'name': 'parachute'}, {'frequency': 'c', 'synset': 'parakeet.n.01', 'synonyms': ['parakeet', 'parrakeet', 'parroket', 'paraquet', 'paroquet', 'parroquet'], 'id': 761, 'def': 'any of numerous small slender long-tailed parrots', 'name': 'parakeet'}, {'frequency': 'c', 'synset': 'parasail.n.01', 'synonyms': ['parasail_(sports)'], 'id': 762, 'def': 'parachute that will lift a person up into the air when it is towed by a motorboat or a car', 'name': 'parasail_(sports)'}, {'frequency': 'c', 'synset': 'parasol.n.01', 'synonyms': ['parasol', 'sunshade'], 'id': 763, 'def': 'a handheld collapsible source of shade', 'name': 'parasol'}, {'frequency': 'r', 'synset': 'parchment.n.01', 'synonyms': ['parchment'], 'id': 764, 'def': 'a superior paper resembling sheepskin', 'name': 'parchment'}, {'frequency': 'c', 'synset': 'parka.n.01', 'synonyms': ['parka', 'anorak'], 'id': 765, 'def': "a kind of heavy jacket (`windcheater' is a British term)", 'name': 'parka'}, {'frequency': 'f', 'synset': 'parking_meter.n.01', 'synonyms': ['parking_meter'], 'id': 766, 'def': 'a coin-operated timer located next to a parking space', 'name': 'parking_meter'}, {'frequency': 'c', 'synset': 'parrot.n.01', 'synonyms': ['parrot'], 'id': 767, 'def': 'usually brightly colored tropical birds with short hooked beaks and the ability to mimic sounds', 'name': 'parrot'}, {'frequency': 'c', 'synset': 'passenger_car.n.01', 'synonyms': ['passenger_car_(part_of_a_train)', 'coach_(part_of_a_train)'], 'id': 768, 'def': 'a railcar where passengers ride', 'name': 'passenger_car_(part_of_a_train)'}, {'frequency': 'r', 'synset': 'passenger_ship.n.01', 'synonyms': ['passenger_ship'], 'id': 769, 'def': 'a ship built to carry passengers', 'name': 'passenger_ship'}, {'frequency': 'c', 'synset': 'passport.n.02', 'synonyms': ['passport'], 'id': 770, 'def': 'a document issued by a country to a citizen allowing that person to travel abroad and re-enter the home country', 'name': 'passport'}, {'frequency': 'f', 'synset': 'pastry.n.02', 'synonyms': ['pastry'], 'id': 771, 'def': 'any of various baked foods made of dough or batter', 'name': 'pastry'}, {'frequency': 'r', 'synset': 'patty.n.01', 'synonyms': ['patty_(food)'], 'id': 772, 'def': 'small flat mass of chopped food', 'name': 'patty_(food)'}, {'frequency': 'c', 'synset': 'pea.n.01', 'synonyms': ['pea_(food)'], 'id': 773, 'def': 'seed of a pea plant used for food', 'name': 'pea_(food)'}, {'frequency': 'c', 'synset': 'peach.n.03', 'synonyms': ['peach'], 'id': 774, 'def': 'downy juicy fruit with sweet yellowish or whitish flesh', 'name': 'peach'}, {'frequency': 'c', 'synset': 'peanut_butter.n.01', 'synonyms': ['peanut_butter'], 'id': 775, 'def': 'a spread made from ground peanuts', 'name': 'peanut_butter'}, {'frequency': 'f', 'synset': 'pear.n.01', 'synonyms': ['pear'], 'id': 776, 'def': 'sweet juicy gritty-textured fruit available in many varieties', 'name': 'pear'}, {'frequency': 'c', 'synset': 'peeler.n.03', 'synonyms': ['peeler_(tool_for_fruit_and_vegetables)'], 'id': 777, 'def': 'a device for peeling vegetables or fruits', 'name': 'peeler_(tool_for_fruit_and_vegetables)'}, {'frequency': 'r', 'synset': 'peg.n.04', 'synonyms': ['wooden_leg', 'pegleg'], 'id': 778, 'def': 'a prosthesis that replaces a missing leg', 'name': 'wooden_leg'}, {'frequency': 'r', 'synset': 'pegboard.n.01', 'synonyms': ['pegboard'], 'id': 779, 'def': 'a board perforated with regularly spaced holes into which pegs can be fitted', 'name': 'pegboard'}, {'frequency': 'c', 'synset': 'pelican.n.01', 'synonyms': ['pelican'], 'id': 780, 'def': 'large long-winged warm-water seabird having a large bill with a distensible pouch for fish', 'name': 'pelican'}, {'frequency': 'f', 'synset': 'pen.n.01', 'synonyms': ['pen'], 'id': 781, 'def': 'a writing implement with a point from which ink flows', 'name': 'pen'}, {'frequency': 'f', 'synset': 'pencil.n.01', 'synonyms': ['pencil'], 'id': 782, 'def': 'a thin cylindrical pointed writing implement made of wood and graphite', 'name': 'pencil'}, {'frequency': 'r', 'synset': 'pencil_box.n.01', 'synonyms': ['pencil_box', 'pencil_case'], 'id': 783, 'def': 'a box for holding pencils', 'name': 'pencil_box'}, {'frequency': 'r', 'synset': 'pencil_sharpener.n.01', 'synonyms': ['pencil_sharpener'], 'id': 784, 'def': 'a rotary implement for sharpening the point on pencils', 'name': 'pencil_sharpener'}, {'frequency': 'r', 'synset': 'pendulum.n.01', 'synonyms': ['pendulum'], 'id': 785, 'def': 'an apparatus consisting of an object mounted so that it swings freely under the influence of gravity', 'name': 'pendulum'}, {'frequency': 'c', 'synset': 'penguin.n.01', 'synonyms': ['penguin'], 'id': 786, 'def': 'short-legged flightless birds of cold southern regions having webbed feet and wings modified as flippers', 'name': 'penguin'}, {'frequency': 'r', 'synset': 'pennant.n.02', 'synonyms': ['pennant'], 'id': 787, 'def': 'a flag longer than it is wide (and often tapering)', 'name': 'pennant'}, {'frequency': 'r', 'synset': 'penny.n.02', 'synonyms': ['penny_(coin)'], 'id': 788, 'def': 'a coin worth one-hundredth of the value of the basic unit', 'name': 'penny_(coin)'}, {'frequency': 'f', 'synset': 'pepper.n.03', 'synonyms': ['pepper', 'peppercorn'], 'id': 789, 'def': 'pungent seasoning from the berry of the common pepper plant; whole or ground', 'name': 'pepper'}, {'frequency': 'c', 'synset': 'pepper_mill.n.01', 'synonyms': ['pepper_mill', 'pepper_grinder'], 'id': 790, 'def': 'a mill for grinding pepper', 'name': 'pepper_mill'}, {'frequency': 'c', 'synset': 'perfume.n.02', 'synonyms': ['perfume'], 'id': 791, 'def': 'a toiletry that emits and diffuses a fragrant odor', 'name': 'perfume'}, {'frequency': 'r', 'synset': 'persimmon.n.02', 'synonyms': ['persimmon'], 'id': 792, 'def': 'orange fruit resembling a plum; edible when fully ripe', 'name': 'persimmon'}, {'frequency': 'f', 'synset': 'person.n.01', 'synonyms': ['person', 'baby', 'child', 'boy', 'girl', 'man', 'woman', 'human'], 'id': 793, 'def': 'a human being', 'name': 'person'}, {'frequency': 'c', 'synset': 'pet.n.01', 'synonyms': ['pet'], 'id': 794, 'def': 'a domesticated animal kept for companionship or amusement', 'name': 'pet'}, {'frequency': 'c', 'synset': 'pew.n.01', 'synonyms': ['pew_(church_bench)', 'church_bench'], 'id': 795, 'def': 'long bench with backs; used in church by the congregation', 'name': 'pew_(church_bench)'}, {'frequency': 'r', 'synset': 'phonebook.n.01', 'synonyms': ['phonebook', 'telephone_book', 'telephone_directory'], 'id': 796, 'def': 'a directory containing an alphabetical list of telephone subscribers and their telephone numbers', 'name': 'phonebook'}, {'frequency': 'c', 'synset': 'phonograph_record.n.01', 'synonyms': ['phonograph_record', 'phonograph_recording', 'record_(phonograph_recording)'], 'id': 797, 'def': 'sound recording consisting of a typically black disk with a continuous groove', 'name': 'phonograph_record'}, {'frequency': 'f', 'synset': 'piano.n.01', 'synonyms': ['piano'], 'id': 798, 'def': 'a keyboard instrument that is played by depressing keys that cause hammers to strike tuned strings and produce sounds', 'name': 'piano'}, {'frequency': 'f', 'synset': 'pickle.n.01', 'synonyms': ['pickle'], 'id': 799, 'def': 'vegetables (especially cucumbers) preserved in brine or vinegar', 'name': 'pickle'}, {'frequency': 'f', 'synset': 'pickup.n.01', 'synonyms': ['pickup_truck'], 'id': 800, 'def': 'a light truck with an open body and low sides and a tailboard', 'name': 'pickup_truck'}, {'frequency': 'c', 'synset': 'pie.n.01', 'synonyms': ['pie'], 'id': 801, 'def': 'dish baked in pastry-lined pan often with a pastry top', 'name': 'pie'}, {'frequency': 'c', 'synset': 'pigeon.n.01', 'synonyms': ['pigeon'], 'id': 802, 'def': 'wild and domesticated birds having a heavy body and short legs', 'name': 'pigeon'}, {'frequency': 'r', 'synset': 'piggy_bank.n.01', 'synonyms': ['piggy_bank', 'penny_bank'], 'id': 803, 'def': "a child's coin bank (often shaped like a pig)", 'name': 'piggy_bank'}, {'frequency': 'f', 'synset': 'pillow.n.01', 'synonyms': ['pillow'], 'id': 804, 'def': 'a cushion to support the head of a sleeping person', 'name': 'pillow'}, {'frequency': 'r', 'synset': 'pin.n.09', 'synonyms': ['pin_(non_jewelry)'], 'id': 805, 'def': 'a small slender (often pointed) piece of wood or metal used to support or fasten or attach things', 'name': 'pin_(non_jewelry)'}, {'frequency': 'f', 'synset': 'pineapple.n.02', 'synonyms': ['pineapple'], 'id': 806, 'def': 'large sweet fleshy tropical fruit with a tuft of stiff leaves', 'name': 'pineapple'}, {'frequency': 'c', 'synset': 'pinecone.n.01', 'synonyms': ['pinecone'], 'id': 807, 'def': 'the seed-producing cone of a pine tree', 'name': 'pinecone'}, {'frequency': 'r', 'synset': 'ping-pong_ball.n.01', 'synonyms': ['ping-pong_ball'], 'id': 808, 'def': 'light hollow ball used in playing table tennis', 'name': 'ping-pong_ball'}, {'frequency': 'r', 'synset': 'pinwheel.n.03', 'synonyms': ['pinwheel'], 'id': 809, 'def': 'a toy consisting of vanes of colored paper or plastic that is pinned to a stick and spins when it is pointed into the wind', 'name': 'pinwheel'}, {'frequency': 'r', 'synset': 'pipe.n.01', 'synonyms': ['tobacco_pipe'], 'id': 810, 'def': 'a tube with a small bowl at one end; used for smoking tobacco', 'name': 'tobacco_pipe'}, {'frequency': 'f', 'synset': 'pipe.n.02', 'synonyms': ['pipe', 'piping'], 'id': 811, 'def': 'a long tube made of metal or plastic that is used to carry water or oil or gas etc.', 'name': 'pipe'}, {'frequency': 'r', 'synset': 'pistol.n.01', 'synonyms': ['pistol', 'handgun'], 'id': 812, 'def': 'a firearm that is held and fired with one hand', 'name': 'pistol'}, {'frequency': 'c', 'synset': 'pita.n.01', 'synonyms': ['pita_(bread)', 'pocket_bread'], 'id': 813, 'def': 'usually small round bread that can open into a pocket for filling', 'name': 'pita_(bread)'}, {'frequency': 'f', 'synset': 'pitcher.n.02', 'synonyms': ['pitcher_(vessel_for_liquid)', 'ewer'], 'id': 814, 'def': 'an open vessel with a handle and a spout for pouring', 'name': 'pitcher_(vessel_for_liquid)'}, {'frequency': 'r', 'synset': 'pitchfork.n.01', 'synonyms': ['pitchfork'], 'id': 815, 'def': 'a long-handled hand tool with sharp widely spaced prongs for lifting and pitching hay', 'name': 'pitchfork'}, {'frequency': 'f', 'synset': 'pizza.n.01', 'synonyms': ['pizza'], 'id': 816, 'def': 'Italian open pie made of thin bread dough spread with a spiced mixture of e.g. tomato sauce and cheese', 'name': 'pizza'}, {'frequency': 'f', 'synset': 'place_mat.n.01', 'synonyms': ['place_mat'], 'id': 817, 'def': 'a mat placed on a table for an individual place setting', 'name': 'place_mat'}, {'frequency': 'f', 'synset': 'plate.n.04', 'synonyms': ['plate'], 'id': 818, 'def': 'dish on which food is served or from which food is eaten', 'name': 'plate'}, {'frequency': 'c', 'synset': 'platter.n.01', 'synonyms': ['platter'], 'id': 819, 'def': 'a large shallow dish used for serving food', 'name': 'platter'}, {'frequency': 'r', 'synset': 'playpen.n.01', 'synonyms': ['playpen'], 'id': 820, 'def': 'a portable enclosure in which babies may be left to play', 'name': 'playpen'}, {'frequency': 'c', 'synset': 'pliers.n.01', 'synonyms': ['pliers', 'plyers'], 'id': 821, 'def': 'a gripping hand tool with two hinged arms and (usually) serrated jaws', 'name': 'pliers'}, {'frequency': 'r', 'synset': 'plow.n.01', 'synonyms': ['plow_(farm_equipment)', 'plough_(farm_equipment)'], 'id': 822, 'def': 'a farm tool having one or more heavy blades to break the soil and cut a furrow prior to sowing', 'name': 'plow_(farm_equipment)'}, {'frequency': 'r', 'synset': 'plume.n.02', 'synonyms': ['plume'], 'id': 823, 'def': 'a feather or cluster of feathers worn as an ornament', 'name': 'plume'}, {'frequency': 'r', 'synset': 'pocket_watch.n.01', 'synonyms': ['pocket_watch'], 'id': 824, 'def': 'a watch that is carried in a small watch pocket', 'name': 'pocket_watch'}, {'frequency': 'c', 'synset': 'pocketknife.n.01', 'synonyms': ['pocketknife'], 'id': 825, 'def': 'a knife with a blade that folds into the handle; suitable for carrying in the pocket', 'name': 'pocketknife'}, {'frequency': 'c', 'synset': 'poker.n.01', 'synonyms': ['poker_(fire_stirring_tool)', 'stove_poker', 'fire_hook'], 'id': 826, 'def': 'fire iron consisting of a metal rod with a handle; used to stir a fire', 'name': 'poker_(fire_stirring_tool)'}, {'frequency': 'f', 'synset': 'pole.n.01', 'synonyms': ['pole', 'post'], 'id': 827, 'def': 'a long (usually round) rod of wood or metal or plastic', 'name': 'pole'}, {'frequency': 'f', 'synset': 'polo_shirt.n.01', 'synonyms': ['polo_shirt', 'sport_shirt'], 'id': 828, 'def': 'a shirt with short sleeves designed for comfort and casual wear', 'name': 'polo_shirt'}, {'frequency': 'r', 'synset': 'poncho.n.01', 'synonyms': ['poncho'], 'id': 829, 'def': 'a blanket-like cloak with a hole in the center for the head', 'name': 'poncho'}, {'frequency': 'c', 'synset': 'pony.n.05', 'synonyms': ['pony'], 'id': 830, 'def': 'any of various breeds of small gentle horses usually less than five feet high at the shoulder', 'name': 'pony'}, {'frequency': 'r', 'synset': 'pool_table.n.01', 'synonyms': ['pool_table', 'billiard_table', 'snooker_table'], 'id': 831, 'def': 'game equipment consisting of a heavy table on which pool is played', 'name': 'pool_table'}, {'frequency': 'f', 'synset': 'pop.n.02', 'synonyms': ['pop_(soda)', 'soda_(pop)', 'tonic', 'soft_drink'], 'id': 832, 'def': 'a sweet drink containing carbonated water and flavoring', 'name': 'pop_(soda)'}, {'frequency': 'c', 'synset': 'postbox.n.01', 'synonyms': ['postbox_(public)', 'mailbox_(public)'], 'id': 833, 'def': 'public box for deposit of mail', 'name': 'postbox_(public)'}, {'frequency': 'c', 'synset': 'postcard.n.01', 'synonyms': ['postcard', 'postal_card', 'mailing-card'], 'id': 834, 'def': 'a card for sending messages by post without an envelope', 'name': 'postcard'}, {'frequency': 'f', 'synset': 'poster.n.01', 'synonyms': ['poster', 'placard'], 'id': 835, 'def': 'a sign posted in a public place as an advertisement', 'name': 'poster'}, {'frequency': 'f', 'synset': 'pot.n.01', 'synonyms': ['pot'], 'id': 836, 'def': 'metal or earthenware cooking vessel that is usually round and deep; often has a handle and lid', 'name': 'pot'}, {'frequency': 'f', 'synset': 'pot.n.04', 'synonyms': ['flowerpot'], 'id': 837, 'def': 'a container in which plants are cultivated', 'name': 'flowerpot'}, {'frequency': 'f', 'synset': 'potato.n.01', 'synonyms': ['potato'], 'id': 838, 'def': 'an edible tuber native to South America', 'name': 'potato'}, {'frequency': 'c', 'synset': 'potholder.n.01', 'synonyms': ['potholder'], 'id': 839, 'def': 'an insulated pad for holding hot pots', 'name': 'potholder'}, {'frequency': 'c', 'synset': 'pottery.n.01', 'synonyms': ['pottery', 'clayware'], 'id': 840, 'def': 'ceramic ware made from clay and baked in a kiln', 'name': 'pottery'}, {'frequency': 'c', 'synset': 'pouch.n.01', 'synonyms': ['pouch'], 'id': 841, 'def': 'a small or medium size container for holding or carrying things', 'name': 'pouch'}, {'frequency': 'c', 'synset': 'power_shovel.n.01', 'synonyms': ['power_shovel', 'excavator', 'digger'], 'id': 842, 'def': 'a machine for excavating', 'name': 'power_shovel'}, {'frequency': 'c', 'synset': 'prawn.n.01', 'synonyms': ['prawn', 'shrimp'], 'id': 843, 'def': 'any of various edible decapod crustaceans', 'name': 'prawn'}, {'frequency': 'c', 'synset': 'pretzel.n.01', 'synonyms': ['pretzel'], 'id': 844, 'def': 'glazed and salted cracker typically in the shape of a loose knot', 'name': 'pretzel'}, {'frequency': 'f', 'synset': 'printer.n.03', 'synonyms': ['printer', 'printing_machine'], 'id': 845, 'def': 'a machine that prints', 'name': 'printer'}, {'frequency': 'c', 'synset': 'projectile.n.01', 'synonyms': ['projectile_(weapon)', 'missile'], 'id': 846, 'def': 'a weapon that is forcibly thrown or projected at a targets', 'name': 'projectile_(weapon)'}, {'frequency': 'c', 'synset': 'projector.n.02', 'synonyms': ['projector'], 'id': 847, 'def': 'an optical instrument that projects an enlarged image onto a screen', 'name': 'projector'}, {'frequency': 'f', 'synset': 'propeller.n.01', 'synonyms': ['propeller', 'propellor'], 'id': 848, 'def': 'a mechanical device that rotates to push against air or water', 'name': 'propeller'}, {'frequency': 'r', 'synset': 'prune.n.01', 'synonyms': ['prune'], 'id': 849, 'def': 'dried plum', 'name': 'prune'}, {'frequency': 'r', 'synset': 'pudding.n.01', 'synonyms': ['pudding'], 'id': 850, 'def': 'any of various soft thick unsweetened baked dishes', 'name': 'pudding'}, {'frequency': 'r', 'synset': 'puffer.n.02', 'synonyms': ['puffer_(fish)', 'pufferfish', 'blowfish', 'globefish'], 'id': 851, 'def': 'fishes whose elongated spiny body can inflate itself with water or air to form a globe', 'name': 'puffer_(fish)'}, {'frequency': 'r', 'synset': 'puffin.n.01', 'synonyms': ['puffin'], 'id': 852, 'def': 'seabirds having short necks and brightly colored compressed bills', 'name': 'puffin'}, {'frequency': 'r', 'synset': 'pug.n.01', 'synonyms': ['pug-dog'], 'id': 853, 'def': 'small compact smooth-coated breed of Asiatic origin having a tightly curled tail and broad flat wrinkled muzzle', 'name': 'pug-dog'}, {'frequency': 'c', 'synset': 'pumpkin.n.02', 'synonyms': ['pumpkin'], 'id': 854, 'def': 'usually large pulpy deep-yellow round fruit of the squash family maturing in late summer or early autumn', 'name': 'pumpkin'}, {'frequency': 'r', 'synset': 'punch.n.03', 'synonyms': ['puncher'], 'id': 855, 'def': 'a tool for making holes or indentations', 'name': 'puncher'}, {'frequency': 'r', 'synset': 'puppet.n.01', 'synonyms': ['puppet', 'marionette'], 'id': 856, 'def': 'a small figure of a person operated from above with strings by a puppeteer', 'name': 'puppet'}, {'frequency': 'c', 'synset': 'puppy.n.01', 'synonyms': ['puppy'], 'id': 857, 'def': 'a young dog', 'name': 'puppy'}, {'frequency': 'r', 'synset': 'quesadilla.n.01', 'synonyms': ['quesadilla'], 'id': 858, 'def': 'a tortilla that is filled with cheese and heated', 'name': 'quesadilla'}, {'frequency': 'r', 'synset': 'quiche.n.02', 'synonyms': ['quiche'], 'id': 859, 'def': 'a tart filled with rich unsweetened custard; often contains other ingredients (as cheese or ham or seafood or vegetables)', 'name': 'quiche'}, {'frequency': 'f', 'synset': 'quilt.n.01', 'synonyms': ['quilt', 'comforter'], 'id': 860, 'def': 'bedding made of two layers of cloth filled with stuffing and stitched together', 'name': 'quilt'}, {'frequency': 'c', 'synset': 'rabbit.n.01', 'synonyms': ['rabbit'], 'id': 861, 'def': 'any of various burrowing animals of the family Leporidae having long ears and short tails', 'name': 'rabbit'}, {'frequency': 'r', 'synset': 'racer.n.02', 'synonyms': ['race_car', 'racing_car'], 'id': 862, 'def': 'a fast car that competes in races', 'name': 'race_car'}, {'frequency': 'c', 'synset': 'racket.n.04', 'synonyms': ['racket', 'racquet'], 'id': 863, 'def': 'a sports implement used to strike a ball in various games', 'name': 'racket'}, {'frequency': 'r', 'synset': 'radar.n.01', 'synonyms': ['radar'], 'id': 864, 'def': 'measuring instrument in which the echo of a pulse of microwave radiation is used to detect and locate distant objects', 'name': 'radar'}, {'frequency': 'f', 'synset': 'radiator.n.03', 'synonyms': ['radiator'], 'id': 865, 'def': 'a mechanism consisting of a metal honeycomb through which hot fluids circulate', 'name': 'radiator'}, {'frequency': 'c', 'synset': 'radio_receiver.n.01', 'synonyms': ['radio_receiver', 'radio_set', 'radio', 'tuner_(radio)'], 'id': 866, 'def': 'an electronic receiver that detects and demodulates and amplifies transmitted radio signals', 'name': 'radio_receiver'}, {'frequency': 'c', 'synset': 'radish.n.03', 'synonyms': ['radish', 'daikon'], 'id': 867, 'def': 'pungent edible root of any of various cultivated radish plants', 'name': 'radish'}, {'frequency': 'c', 'synset': 'raft.n.01', 'synonyms': ['raft'], 'id': 868, 'def': 'a flat float (usually made of logs or planks) that can be used for transport or as a platform for swimmers', 'name': 'raft'}, {'frequency': 'r', 'synset': 'rag_doll.n.01', 'synonyms': ['rag_doll'], 'id': 869, 'def': 'a cloth doll that is stuffed and (usually) painted', 'name': 'rag_doll'}, {'frequency': 'c', 'synset': 'raincoat.n.01', 'synonyms': ['raincoat', 'waterproof_jacket'], 'id': 870, 'def': 'a water-resistant coat', 'name': 'raincoat'}, {'frequency': 'c', 'synset': 'ram.n.05', 'synonyms': ['ram_(animal)'], 'id': 871, 'def': 'uncastrated adult male sheep', 'name': 'ram_(animal)'}, {'frequency': 'c', 'synset': 'raspberry.n.02', 'synonyms': ['raspberry'], 'id': 872, 'def': 'red or black edible aggregate berries usually smaller than the related blackberries', 'name': 'raspberry'}, {'frequency': 'r', 'synset': 'rat.n.01', 'synonyms': ['rat'], 'id': 873, 'def': 'any of various long-tailed rodents similar to but larger than a mouse', 'name': 'rat'}, {'frequency': 'c', 'synset': 'razorblade.n.01', 'synonyms': ['razorblade'], 'id': 874, 'def': 'a blade that has very sharp edge', 'name': 'razorblade'}, {'frequency': 'c', 'synset': 'reamer.n.01', 'synonyms': ['reamer_(juicer)', 'juicer', 'juice_reamer'], 'id': 875, 'def': 'a squeezer with a conical ridged center that is used for squeezing juice from citrus fruit', 'name': 'reamer_(juicer)'}, {'frequency': 'f', 'synset': 'rearview_mirror.n.01', 'synonyms': ['rearview_mirror'], 'id': 876, 'def': 'vehicle mirror (side or rearview)', 'name': 'rearview_mirror'}, {'frequency': 'c', 'synset': 'receipt.n.02', 'synonyms': ['receipt'], 'id': 877, 'def': 'an acknowledgment (usually tangible) that payment has been made', 'name': 'receipt'}, {'frequency': 'c', 'synset': 'recliner.n.01', 'synonyms': ['recliner', 'reclining_chair', 'lounger_(chair)'], 'id': 878, 'def': 'an armchair whose back can be lowered and foot can be raised to allow the sitter to recline in it', 'name': 'recliner'}, {'frequency': 'c', 'synset': 'record_player.n.01', 'synonyms': ['record_player', 'phonograph_(record_player)', 'turntable'], 'id': 879, 'def': 'machine in which rotating records cause a stylus to vibrate and the vibrations are amplified acoustically or electronically', 'name': 'record_player'}, {'frequency': 'f', 'synset': 'reflector.n.01', 'synonyms': ['reflector'], 'id': 880, 'def': 'device that reflects light, radiation, etc.', 'name': 'reflector'}, {'frequency': 'f', 'synset': 'remote_control.n.01', 'synonyms': ['remote_control'], 'id': 881, 'def': 'a device that can be used to control a machine or apparatus from a distance', 'name': 'remote_control'}, {'frequency': 'c', 'synset': 'rhinoceros.n.01', 'synonyms': ['rhinoceros'], 'id': 882, 'def': 'massive powerful herbivorous odd-toed ungulate of southeast Asia and Africa having very thick skin and one or two horns on the snout', 'name': 'rhinoceros'}, {'frequency': 'r', 'synset': 'rib.n.03', 'synonyms': ['rib_(food)'], 'id': 883, 'def': 'cut of meat including one or more ribs', 'name': 'rib_(food)'}, {'frequency': 'c', 'synset': 'rifle.n.01', 'synonyms': ['rifle'], 'id': 884, 'def': 'a shoulder firearm with a long barrel', 'name': 'rifle'}, {'frequency': 'f', 'synset': 'ring.n.08', 'synonyms': ['ring'], 'id': 885, 'def': 'jewelry consisting of a circlet of precious metal (often set with jewels) worn on the finger', 'name': 'ring'}, {'frequency': 'r', 'synset': 'river_boat.n.01', 'synonyms': ['river_boat'], 'id': 886, 'def': 'a boat used on rivers or to ply a river', 'name': 'river_boat'}, {'frequency': 'r', 'synset': 'road_map.n.02', 'synonyms': ['road_map'], 'id': 887, 'def': '(NOT A ROAD) a MAP showing roads (for automobile travel)', 'name': 'road_map'}, {'frequency': 'c', 'synset': 'robe.n.01', 'synonyms': ['robe'], 'id': 888, 'def': 'any loose flowing garment', 'name': 'robe'}, {'frequency': 'c', 'synset': 'rocking_chair.n.01', 'synonyms': ['rocking_chair'], 'id': 889, 'def': 'a chair mounted on rockers', 'name': 'rocking_chair'}, {'frequency': 'r', 'synset': 'rodent.n.01', 'synonyms': ['rodent'], 'id': 890, 'def': 'relatively small placental mammals having a single pair of constantly growing incisor teeth specialized for gnawing', 'name': 'rodent'}, {'frequency': 'r', 'synset': 'roller_skate.n.01', 'synonyms': ['roller_skate'], 'id': 891, 'def': 'a shoe with pairs of rollers (small hard wheels) fixed to the sole', 'name': 'roller_skate'}, {'frequency': 'r', 'synset': 'rollerblade.n.01', 'synonyms': ['Rollerblade'], 'id': 892, 'def': 'an in-line variant of a roller skate', 'name': 'Rollerblade'}, {'frequency': 'c', 'synset': 'rolling_pin.n.01', 'synonyms': ['rolling_pin'], 'id': 893, 'def': 'utensil consisting of a cylinder (usually of wood) with a handle at each end; used to roll out dough', 'name': 'rolling_pin'}, {'frequency': 'r', 'synset': 'root_beer.n.01', 'synonyms': ['root_beer'], 'id': 894, 'def': 'carbonated drink containing extracts of roots and herbs', 'name': 'root_beer'}, {'frequency': 'c', 'synset': 'router.n.02', 'synonyms': ['router_(computer_equipment)'], 'id': 895, 'def': 'a device that forwards data packets between computer networks', 'name': 'router_(computer_equipment)'}, {'frequency': 'f', 'synset': 'rubber_band.n.01', 'synonyms': ['rubber_band', 'elastic_band'], 'id': 896, 'def': 'a narrow band of elastic rubber used to hold things (such as papers) together', 'name': 'rubber_band'}, {'frequency': 'c', 'synset': 'runner.n.08', 'synonyms': ['runner_(carpet)'], 'id': 897, 'def': 'a long narrow carpet', 'name': 'runner_(carpet)'}, {'frequency': 'f', 'synset': 'sack.n.01', 'synonyms': ['plastic_bag', 'paper_bag'], 'id': 898, 'def': "a bag made of paper or plastic for holding customer's purchases", 'name': 'plastic_bag'}, {'frequency': 'f', 'synset': 'saddle.n.01', 'synonyms': ['saddle_(on_an_animal)'], 'id': 899, 'def': 'a seat for the rider of a horse or camel', 'name': 'saddle_(on_an_animal)'}, {'frequency': 'f', 'synset': 'saddle_blanket.n.01', 'synonyms': ['saddle_blanket', 'saddlecloth', 'horse_blanket'], 'id': 900, 'def': 'stable gear consisting of a blanket placed under the saddle', 'name': 'saddle_blanket'}, {'frequency': 'c', 'synset': 'saddlebag.n.01', 'synonyms': ['saddlebag'], 'id': 901, 'def': 'a large bag (or pair of bags) hung over a saddle', 'name': 'saddlebag'}, {'frequency': 'r', 'synset': 'safety_pin.n.01', 'synonyms': ['safety_pin'], 'id': 902, 'def': 'a pin in the form of a clasp; has a guard so the point of the pin will not stick the user', 'name': 'safety_pin'}, {'frequency': 'f', 'synset': 'sail.n.01', 'synonyms': ['sail'], 'id': 903, 'def': 'a large piece of fabric by means of which wind is used to propel a sailing vessel', 'name': 'sail'}, {'frequency': 'f', 'synset': 'salad.n.01', 'synonyms': ['salad'], 'id': 904, 'def': 'food mixtures either arranged on a plate or tossed and served with a moist dressing; usually consisting of or including greens', 'name': 'salad'}, {'frequency': 'r', 'synset': 'salad_plate.n.01', 'synonyms': ['salad_plate', 'salad_bowl'], 'id': 905, 'def': 'a plate or bowl for individual servings of salad', 'name': 'salad_plate'}, {'frequency': 'c', 'synset': 'salami.n.01', 'synonyms': ['salami'], 'id': 906, 'def': 'highly seasoned fatty sausage of pork and beef usually dried', 'name': 'salami'}, {'frequency': 'c', 'synset': 'salmon.n.01', 'synonyms': ['salmon_(fish)'], 'id': 907, 'def': 'any of various large food and game fishes of northern waters', 'name': 'salmon_(fish)'}, {'frequency': 'r', 'synset': 'salmon.n.03', 'synonyms': ['salmon_(food)'], 'id': 908, 'def': 'flesh of any of various marine or freshwater fish of the family Salmonidae', 'name': 'salmon_(food)'}, {'frequency': 'c', 'synset': 'salsa.n.01', 'synonyms': ['salsa'], 'id': 909, 'def': 'spicy sauce of tomatoes and onions and chili peppers to accompany Mexican foods', 'name': 'salsa'}, {'frequency': 'f', 'synset': 'saltshaker.n.01', 'synonyms': ['saltshaker'], 'id': 910, 'def': 'a shaker with a perforated top for sprinkling salt', 'name': 'saltshaker'}, {'frequency': 'f', 'synset': 'sandal.n.01', 'synonyms': ['sandal_(type_of_shoe)'], 'id': 911, 'def': 'a shoe consisting of a sole fastened by straps to the foot', 'name': 'sandal_(type_of_shoe)'}, {'frequency': 'f', 'synset': 'sandwich.n.01', 'synonyms': ['sandwich'], 'id': 912, 'def': 'two (or more) slices of bread with a filling between them', 'name': 'sandwich'}, {'frequency': 'r', 'synset': 'satchel.n.01', 'synonyms': ['satchel'], 'id': 913, 'def': 'luggage consisting of a small case with a flat bottom and (usually) a shoulder strap', 'name': 'satchel'}, {'frequency': 'r', 'synset': 'saucepan.n.01', 'synonyms': ['saucepan'], 'id': 914, 'def': 'a deep pan with a handle; used for stewing or boiling', 'name': 'saucepan'}, {'frequency': 'f', 'synset': 'saucer.n.02', 'synonyms': ['saucer'], 'id': 915, 'def': 'a small shallow dish for holding a cup at the table', 'name': 'saucer'}, {'frequency': 'f', 'synset': 'sausage.n.01', 'synonyms': ['sausage'], 'id': 916, 'def': 'highly seasoned minced meat stuffed in casings', 'name': 'sausage'}, {'frequency': 'r', 'synset': 'sawhorse.n.01', 'synonyms': ['sawhorse', 'sawbuck'], 'id': 917, 'def': 'a framework for holding wood that is being sawed', 'name': 'sawhorse'}, {'frequency': 'r', 'synset': 'sax.n.02', 'synonyms': ['saxophone'], 'id': 918, 'def': "a wind instrument with a `J'-shaped form typically made of brass", 'name': 'saxophone'}, {'frequency': 'f', 'synset': 'scale.n.07', 'synonyms': ['scale_(measuring_instrument)'], 'id': 919, 'def': 'a measuring instrument for weighing; shows amount of mass', 'name': 'scale_(measuring_instrument)'}, {'frequency': 'r', 'synset': 'scarecrow.n.01', 'synonyms': ['scarecrow', 'strawman'], 'id': 920, 'def': 'an effigy in the shape of a man to frighten birds away from seeds', 'name': 'scarecrow'}, {'frequency': 'f', 'synset': 'scarf.n.01', 'synonyms': ['scarf'], 'id': 921, 'def': 'a garment worn around the head or neck or shoulders for warmth or decoration', 'name': 'scarf'}, {'frequency': 'c', 'synset': 'school_bus.n.01', 'synonyms': ['school_bus'], 'id': 922, 'def': 'a bus used to transport children to or from school', 'name': 'school_bus'}, {'frequency': 'f', 'synset': 'scissors.n.01', 'synonyms': ['scissors'], 'id': 923, 'def': 'a tool having two crossed pivoting blades with looped handles', 'name': 'scissors'}, {'frequency': 'f', 'synset': 'scoreboard.n.01', 'synonyms': ['scoreboard'], 'id': 924, 'def': 'a large board for displaying the score of a contest (and some other information)', 'name': 'scoreboard'}, {'frequency': 'r', 'synset': 'scraper.n.01', 'synonyms': ['scraper'], 'id': 925, 'def': 'any of various hand tools for scraping', 'name': 'scraper'}, {'frequency': 'c', 'synset': 'screwdriver.n.01', 'synonyms': ['screwdriver'], 'id': 926, 'def': 'a hand tool for driving screws; has a tip that fits into the head of a screw', 'name': 'screwdriver'}, {'frequency': 'f', 'synset': 'scrub_brush.n.01', 'synonyms': ['scrubbing_brush'], 'id': 927, 'def': 'a brush with short stiff bristles for heavy cleaning', 'name': 'scrubbing_brush'}, {'frequency': 'c', 'synset': 'sculpture.n.01', 'synonyms': ['sculpture'], 'id': 928, 'def': 'a three-dimensional work of art', 'name': 'sculpture'}, {'frequency': 'c', 'synset': 'seabird.n.01', 'synonyms': ['seabird', 'seafowl'], 'id': 929, 'def': 'a bird that frequents coastal waters and the open ocean: gulls; pelicans; gannets; cormorants; albatrosses; petrels; etc.', 'name': 'seabird'}, {'frequency': 'c', 'synset': 'seahorse.n.02', 'synonyms': ['seahorse'], 'id': 930, 'def': 'small fish with horse-like heads bent sharply downward and curled tails', 'name': 'seahorse'}, {'frequency': 'r', 'synset': 'seaplane.n.01', 'synonyms': ['seaplane', 'hydroplane'], 'id': 931, 'def': 'an airplane that can land on or take off from water', 'name': 'seaplane'}, {'frequency': 'c', 'synset': 'seashell.n.01', 'synonyms': ['seashell'], 'id': 932, 'def': 'the shell of a marine organism', 'name': 'seashell'}, {'frequency': 'c', 'synset': 'sewing_machine.n.01', 'synonyms': ['sewing_machine'], 'id': 933, 'def': 'a textile machine used as a home appliance for sewing', 'name': 'sewing_machine'}, {'frequency': 'c', 'synset': 'shaker.n.03', 'synonyms': ['shaker'], 'id': 934, 'def': 'a container in which something can be shaken', 'name': 'shaker'}, {'frequency': 'c', 'synset': 'shampoo.n.01', 'synonyms': ['shampoo'], 'id': 935, 'def': 'cleansing agent consisting of soaps or detergents used for washing the hair', 'name': 'shampoo'}, {'frequency': 'c', 'synset': 'shark.n.01', 'synonyms': ['shark'], 'id': 936, 'def': 'typically large carnivorous fishes with sharpe teeth', 'name': 'shark'}, {'frequency': 'r', 'synset': 'sharpener.n.01', 'synonyms': ['sharpener'], 'id': 937, 'def': 'any implement that is used to make something (an edge or a point) sharper', 'name': 'sharpener'}, {'frequency': 'r', 'synset': 'sharpie.n.03', 'synonyms': ['Sharpie'], 'id': 938, 'def': 'a pen with indelible ink that will write on any surface', 'name': 'Sharpie'}, {'frequency': 'r', 'synset': 'shaver.n.03', 'synonyms': ['shaver_(electric)', 'electric_shaver', 'electric_razor'], 'id': 939, 'def': 'a razor powered by an electric motor', 'name': 'shaver_(electric)'}, {'frequency': 'c', 'synset': 'shaving_cream.n.01', 'synonyms': ['shaving_cream', 'shaving_soap'], 'id': 940, 'def': 'toiletry consisting that forms a rich lather for softening the beard before shaving', 'name': 'shaving_cream'}, {'frequency': 'r', 'synset': 'shawl.n.01', 'synonyms': ['shawl'], 'id': 941, 'def': 'cloak consisting of an oblong piece of cloth used to cover the head and shoulders', 'name': 'shawl'}, {'frequency': 'r', 'synset': 'shears.n.01', 'synonyms': ['shears'], 'id': 942, 'def': 'large scissors with strong blades', 'name': 'shears'}, {'frequency': 'f', 'synset': 'sheep.n.01', 'synonyms': ['sheep'], 'id': 943, 'def': 'woolly usually horned ruminant mammal related to the goat', 'name': 'sheep'}, {'frequency': 'r', 'synset': 'shepherd_dog.n.01', 'synonyms': ['shepherd_dog', 'sheepdog'], 'id': 944, 'def': 'any of various usually long-haired breeds of dog reared to herd and guard sheep', 'name': 'shepherd_dog'}, {'frequency': 'r', 'synset': 'sherbert.n.01', 'synonyms': ['sherbert', 'sherbet'], 'id': 945, 'def': 'a frozen dessert made primarily of fruit juice and sugar', 'name': 'sherbert'}, {'frequency': 'c', 'synset': 'shield.n.02', 'synonyms': ['shield'], 'id': 946, 'def': 'armor carried on the arm to intercept blows', 'name': 'shield'}, {'frequency': 'f', 'synset': 'shirt.n.01', 'synonyms': ['shirt'], 'id': 947, 'def': 'a garment worn on the upper half of the body', 'name': 'shirt'}, {'frequency': 'f', 'synset': 'shoe.n.01', 'synonyms': ['shoe', 'sneaker_(type_of_shoe)', 'tennis_shoe'], 'id': 948, 'def': 'common footwear covering the foot', 'name': 'shoe'}, {'frequency': 'f', 'synset': 'shopping_bag.n.01', 'synonyms': ['shopping_bag'], 'id': 949, 'def': 'a bag made of plastic or strong paper (often with handles); used to transport goods after shopping', 'name': 'shopping_bag'}, {'frequency': 'c', 'synset': 'shopping_cart.n.01', 'synonyms': ['shopping_cart'], 'id': 950, 'def': 'a handcart that holds groceries or other goods while shopping', 'name': 'shopping_cart'}, {'frequency': 'f', 'synset': 'short_pants.n.01', 'synonyms': ['short_pants', 'shorts_(clothing)', 'trunks_(clothing)'], 'id': 951, 'def': 'trousers that end at or above the knee', 'name': 'short_pants'}, {'frequency': 'r', 'synset': 'shot_glass.n.01', 'synonyms': ['shot_glass'], 'id': 952, 'def': 'a small glass adequate to hold a single swallow of whiskey', 'name': 'shot_glass'}, {'frequency': 'f', 'synset': 'shoulder_bag.n.01', 'synonyms': ['shoulder_bag'], 'id': 953, 'def': 'a large handbag that can be carried by a strap looped over the shoulder', 'name': 'shoulder_bag'}, {'frequency': 'c', 'synset': 'shovel.n.01', 'synonyms': ['shovel'], 'id': 954, 'def': 'a hand tool for lifting loose material such as snow, dirt, etc.', 'name': 'shovel'}, {'frequency': 'f', 'synset': 'shower.n.01', 'synonyms': ['shower_head'], 'id': 955, 'def': 'a plumbing fixture that sprays water over you', 'name': 'shower_head'}, {'frequency': 'r', 'synset': 'shower_cap.n.01', 'synonyms': ['shower_cap'], 'id': 956, 'def': 'a tight cap worn to keep hair dry while showering', 'name': 'shower_cap'}, {'frequency': 'f', 'synset': 'shower_curtain.n.01', 'synonyms': ['shower_curtain'], 'id': 957, 'def': 'a curtain that keeps water from splashing out of the shower area', 'name': 'shower_curtain'}, {'frequency': 'r', 'synset': 'shredder.n.01', 'synonyms': ['shredder_(for_paper)'], 'id': 958, 'def': 'a device that shreds documents', 'name': 'shredder_(for_paper)'}, {'frequency': 'f', 'synset': 'signboard.n.01', 'synonyms': ['signboard'], 'id': 959, 'def': 'structure displaying a board on which advertisements can be posted', 'name': 'signboard'}, {'frequency': 'c', 'synset': 'silo.n.01', 'synonyms': ['silo'], 'id': 960, 'def': 'a cylindrical tower used for storing goods', 'name': 'silo'}, {'frequency': 'f', 'synset': 'sink.n.01', 'synonyms': ['sink'], 'id': 961, 'def': 'plumbing fixture consisting of a water basin fixed to a wall or floor and having a drainpipe', 'name': 'sink'}, {'frequency': 'f', 'synset': 'skateboard.n.01', 'synonyms': ['skateboard'], 'id': 962, 'def': 'a board with wheels that is ridden in a standing or crouching position and propelled by foot', 'name': 'skateboard'}, {'frequency': 'c', 'synset': 'skewer.n.01', 'synonyms': ['skewer'], 'id': 963, 'def': 'a long pin for holding meat in position while it is being roasted', 'name': 'skewer'}, {'frequency': 'f', 'synset': 'ski.n.01', 'synonyms': ['ski'], 'id': 964, 'def': 'sports equipment for skiing on snow', 'name': 'ski'}, {'frequency': 'f', 'synset': 'ski_boot.n.01', 'synonyms': ['ski_boot'], 'id': 965, 'def': 'a stiff boot that is fastened to a ski with a ski binding', 'name': 'ski_boot'}, {'frequency': 'f', 'synset': 'ski_parka.n.01', 'synonyms': ['ski_parka', 'ski_jacket'], 'id': 966, 'def': 'a parka to be worn while skiing', 'name': 'ski_parka'}, {'frequency': 'f', 'synset': 'ski_pole.n.01', 'synonyms': ['ski_pole'], 'id': 967, 'def': 'a pole with metal points used as an aid in skiing', 'name': 'ski_pole'}, {'frequency': 'f', 'synset': 'skirt.n.02', 'synonyms': ['skirt'], 'id': 968, 'def': 'a garment hanging from the waist; worn mainly by girls and women', 'name': 'skirt'}, {'frequency': 'r', 'synset': 'skullcap.n.01', 'synonyms': ['skullcap'], 'id': 969, 'def': 'rounded brimless cap fitting the crown of the head', 'name': 'skullcap'}, {'frequency': 'c', 'synset': 'sled.n.01', 'synonyms': ['sled', 'sledge', 'sleigh'], 'id': 970, 'def': 'a vehicle or flat object for transportation over snow by sliding or pulled by dogs, etc.', 'name': 'sled'}, {'frequency': 'c', 'synset': 'sleeping_bag.n.01', 'synonyms': ['sleeping_bag'], 'id': 971, 'def': 'large padded bag designed to be slept in outdoors', 'name': 'sleeping_bag'}, {'frequency': 'r', 'synset': 'sling.n.05', 'synonyms': ['sling_(bandage)', 'triangular_bandage'], 'id': 972, 'def': 'bandage to support an injured forearm; slung over the shoulder or neck', 'name': 'sling_(bandage)'}, {'frequency': 'c', 'synset': 'slipper.n.01', 'synonyms': ['slipper_(footwear)', 'carpet_slipper_(footwear)'], 'id': 973, 'def': 'low footwear that can be slipped on and off easily; usually worn indoors', 'name': 'slipper_(footwear)'}, {'frequency': 'r', 'synset': 'smoothie.n.02', 'synonyms': ['smoothie'], 'id': 974, 'def': 'a thick smooth drink consisting of fresh fruit pureed with ice cream or yoghurt or milk', 'name': 'smoothie'}, {'frequency': 'r', 'synset': 'snake.n.01', 'synonyms': ['snake', 'serpent'], 'id': 975, 'def': 'limbless scaly elongate reptile; some are venomous', 'name': 'snake'}, {'frequency': 'f', 'synset': 'snowboard.n.01', 'synonyms': ['snowboard'], 'id': 976, 'def': 'a board that resembles a broad ski or a small surfboard; used in a standing position to slide down snow-covered slopes', 'name': 'snowboard'}, {'frequency': 'c', 'synset': 'snowman.n.01', 'synonyms': ['snowman'], 'id': 977, 'def': 'a figure of a person made of packed snow', 'name': 'snowman'}, {'frequency': 'c', 'synset': 'snowmobile.n.01', 'synonyms': ['snowmobile'], 'id': 978, 'def': 'tracked vehicle for travel on snow having skis in front', 'name': 'snowmobile'}, {'frequency': 'f', 'synset': 'soap.n.01', 'synonyms': ['soap'], 'id': 979, 'def': 'a cleansing agent made from the salts of vegetable or animal fats', 'name': 'soap'}, {'frequency': 'f', 'synset': 'soccer_ball.n.01', 'synonyms': ['soccer_ball'], 'id': 980, 'def': "an inflated ball used in playing soccer (called `football' outside of the United States)", 'name': 'soccer_ball'}, {'frequency': 'f', 'synset': 'sock.n.01', 'synonyms': ['sock'], 'id': 981, 'def': 'cloth covering for the foot; worn inside the shoe; reaches to between the ankle and the knee', 'name': 'sock'}, {'frequency': 'f', 'synset': 'sofa.n.01', 'synonyms': ['sofa', 'couch', 'lounge'], 'id': 982, 'def': 'an upholstered seat for more than one person', 'name': 'sofa'}, {'frequency': 'r', 'synset': 'softball.n.01', 'synonyms': ['softball'], 'id': 983, 'def': 'ball used in playing softball', 'name': 'softball'}, {'frequency': 'c', 'synset': 'solar_array.n.01', 'synonyms': ['solar_array', 'solar_battery', 'solar_panel'], 'id': 984, 'def': 'electrical device consisting of a large array of connected solar cells', 'name': 'solar_array'}, {'frequency': 'r', 'synset': 'sombrero.n.02', 'synonyms': ['sombrero'], 'id': 985, 'def': 'a straw hat with a tall crown and broad brim; worn in American southwest and in Mexico', 'name': 'sombrero'}, {'frequency': 'f', 'synset': 'soup.n.01', 'synonyms': ['soup'], 'id': 986, 'def': 'liquid food especially of meat or fish or vegetable stock often containing pieces of solid food', 'name': 'soup'}, {'frequency': 'r', 'synset': 'soup_bowl.n.01', 'synonyms': ['soup_bowl'], 'id': 987, 'def': 'a bowl for serving soup', 'name': 'soup_bowl'}, {'frequency': 'c', 'synset': 'soupspoon.n.01', 'synonyms': ['soupspoon'], 'id': 988, 'def': 'a spoon with a rounded bowl for eating soup', 'name': 'soupspoon'}, {'frequency': 'c', 'synset': 'sour_cream.n.01', 'synonyms': ['sour_cream', 'soured_cream'], 'id': 989, 'def': 'soured light cream', 'name': 'sour_cream'}, {'frequency': 'r', 'synset': 'soya_milk.n.01', 'synonyms': ['soya_milk', 'soybean_milk', 'soymilk'], 'id': 990, 'def': 'a milk substitute containing soybean flour and water; used in some infant formulas and in making tofu', 'name': 'soya_milk'}, {'frequency': 'r', 'synset': 'space_shuttle.n.01', 'synonyms': ['space_shuttle'], 'id': 991, 'def': "a reusable spacecraft with wings for a controlled descent through the Earth's atmosphere", 'name': 'space_shuttle'}, {'frequency': 'r', 'synset': 'sparkler.n.02', 'synonyms': ['sparkler_(fireworks)'], 'id': 992, 'def': 'a firework that burns slowly and throws out a shower of sparks', 'name': 'sparkler_(fireworks)'}, {'frequency': 'f', 'synset': 'spatula.n.02', 'synonyms': ['spatula'], 'id': 993, 'def': 'a hand tool with a thin flexible blade used to mix or spread soft substances', 'name': 'spatula'}, {'frequency': 'r', 'synset': 'spear.n.01', 'synonyms': ['spear', 'lance'], 'id': 994, 'def': 'a long pointed rod used as a tool or weapon', 'name': 'spear'}, {'frequency': 'f', 'synset': 'spectacles.n.01', 'synonyms': ['spectacles', 'specs', 'eyeglasses', 'glasses'], 'id': 995, 'def': 'optical instrument consisting of a frame that holds a pair of lenses for correcting defective vision', 'name': 'spectacles'}, {'frequency': 'c', 'synset': 'spice_rack.n.01', 'synonyms': ['spice_rack'], 'id': 996, 'def': 'a rack for displaying containers filled with spices', 'name': 'spice_rack'}, {'frequency': 'c', 'synset': 'spider.n.01', 'synonyms': ['spider'], 'id': 997, 'def': 'predatory arachnid with eight legs, two poison fangs, two feelers, and usually two silk-spinning organs at the back end of the body', 'name': 'spider'}, {'frequency': 'r', 'synset': 'spiny_lobster.n.02', 'synonyms': ['crawfish', 'crayfish'], 'id': 998, 'def': 'large edible marine crustacean having a spiny carapace but lacking the large pincers of true lobsters', 'name': 'crawfish'}, {'frequency': 'c', 'synset': 'sponge.n.01', 'synonyms': ['sponge'], 'id': 999, 'def': 'a porous mass usable to absorb water typically used for cleaning', 'name': 'sponge'}, {'frequency': 'f', 'synset': 'spoon.n.01', 'synonyms': ['spoon'], 'id': 1000, 'def': 'a piece of cutlery with a shallow bowl-shaped container and a handle', 'name': 'spoon'}, {'frequency': 'c', 'synset': 'sportswear.n.01', 'synonyms': ['sportswear', 'athletic_wear', 'activewear'], 'id': 1001, 'def': 'attire worn for sport or for casual wear', 'name': 'sportswear'}, {'frequency': 'c', 'synset': 'spotlight.n.02', 'synonyms': ['spotlight'], 'id': 1002, 'def': 'a lamp that produces a strong beam of light to illuminate a restricted area; used to focus attention of a stage performer', 'name': 'spotlight'}, {'frequency': 'r', 'synset': 'squid.n.01', 'synonyms': ['squid_(food)', 'calamari', 'calamary'], 'id': 1003, 'def': '(Italian cuisine) squid prepared as food', 'name': 'squid_(food)'}, {'frequency': 'c', 'synset': 'squirrel.n.01', 'synonyms': ['squirrel'], 'id': 1004, 'def': 'a kind of arboreal rodent having a long bushy tail', 'name': 'squirrel'}, {'frequency': 'r', 'synset': 'stagecoach.n.01', 'synonyms': ['stagecoach'], 'id': 1005, 'def': 'a large coach-and-four formerly used to carry passengers and mail on regular routes between towns', 'name': 'stagecoach'}, {'frequency': 'c', 'synset': 'stapler.n.01', 'synonyms': ['stapler_(stapling_machine)'], 'id': 1006, 'def': 'a machine that inserts staples into sheets of paper in order to fasten them together', 'name': 'stapler_(stapling_machine)'}, {'frequency': 'c', 'synset': 'starfish.n.01', 'synonyms': ['starfish', 'sea_star'], 'id': 1007, 'def': 'echinoderms characterized by five arms extending from a central disk', 'name': 'starfish'}, {'frequency': 'f', 'synset': 'statue.n.01', 'synonyms': ['statue_(sculpture)'], 'id': 1008, 'def': 'a sculpture representing a human or animal', 'name': 'statue_(sculpture)'}, {'frequency': 'c', 'synset': 'steak.n.01', 'synonyms': ['steak_(food)'], 'id': 1009, 'def': 'a slice of meat cut from the fleshy part of an animal or large fish', 'name': 'steak_(food)'}, {'frequency': 'r', 'synset': 'steak_knife.n.01', 'synonyms': ['steak_knife'], 'id': 1010, 'def': 'a sharp table knife used in eating steak', 'name': 'steak_knife'}, {'frequency': 'f', 'synset': 'steering_wheel.n.01', 'synonyms': ['steering_wheel'], 'id': 1011, 'def': 'a handwheel that is used for steering', 'name': 'steering_wheel'}, {'frequency': 'r', 'synset': 'step_ladder.n.01', 'synonyms': ['stepladder'], 'id': 1012, 'def': 'a folding portable ladder hinged at the top', 'name': 'stepladder'}, {'frequency': 'c', 'synset': 'step_stool.n.01', 'synonyms': ['step_stool'], 'id': 1013, 'def': 'a stool that has one or two steps that fold under the seat', 'name': 'step_stool'}, {'frequency': 'c', 'synset': 'stereo.n.01', 'synonyms': ['stereo_(sound_system)'], 'id': 1014, 'def': 'electronic device for playing audio', 'name': 'stereo_(sound_system)'}, {'frequency': 'r', 'synset': 'stew.n.02', 'synonyms': ['stew'], 'id': 1015, 'def': 'food prepared by stewing especially meat or fish with vegetables', 'name': 'stew'}, {'frequency': 'r', 'synset': 'stirrer.n.02', 'synonyms': ['stirrer'], 'id': 1016, 'def': 'an implement used for stirring', 'name': 'stirrer'}, {'frequency': 'f', 'synset': 'stirrup.n.01', 'synonyms': ['stirrup'], 'id': 1017, 'def': "support consisting of metal loops into which rider's feet go", 'name': 'stirrup'}, {'frequency': 'f', 'synset': 'stool.n.01', 'synonyms': ['stool'], 'id': 1018, 'def': 'a simple seat without a back or arms', 'name': 'stool'}, {'frequency': 'f', 'synset': 'stop_sign.n.01', 'synonyms': ['stop_sign'], 'id': 1019, 'def': 'a traffic sign to notify drivers that they must come to a complete stop', 'name': 'stop_sign'}, {'frequency': 'f', 'synset': 'stoplight.n.01', 'synonyms': ['brake_light'], 'id': 1020, 'def': 'a red light on the rear of a motor vehicle that signals when the brakes are applied', 'name': 'brake_light'}, {'frequency': 'f', 'synset': 'stove.n.01', 'synonyms': ['stove', 'kitchen_stove', 'range_(kitchen_appliance)', 'kitchen_range', 'cooking_stove'], 'id': 1021, 'def': 'a kitchen appliance used for cooking food', 'name': 'stove'}, {'frequency': 'c', 'synset': 'strainer.n.01', 'synonyms': ['strainer'], 'id': 1022, 'def': 'a filter to retain larger pieces while smaller pieces and liquids pass through', 'name': 'strainer'}, {'frequency': 'f', 'synset': 'strap.n.01', 'synonyms': ['strap'], 'id': 1023, 'def': 'an elongated strip of material for binding things together or holding', 'name': 'strap'}, {'frequency': 'f', 'synset': 'straw.n.04', 'synonyms': ['straw_(for_drinking)', 'drinking_straw'], 'id': 1024, 'def': 'a thin paper or plastic tube used to suck liquids into the mouth', 'name': 'straw_(for_drinking)'}, {'frequency': 'f', 'synset': 'strawberry.n.01', 'synonyms': ['strawberry'], 'id': 1025, 'def': 'sweet fleshy red fruit', 'name': 'strawberry'}, {'frequency': 'f', 'synset': 'street_sign.n.01', 'synonyms': ['street_sign'], 'id': 1026, 'def': 'a sign visible from the street', 'name': 'street_sign'}, {'frequency': 'f', 'synset': 'streetlight.n.01', 'synonyms': ['streetlight', 'street_lamp'], 'id': 1027, 'def': 'a lamp supported on a lamppost; for illuminating a street', 'name': 'streetlight'}, {'frequency': 'r', 'synset': 'string_cheese.n.01', 'synonyms': ['string_cheese'], 'id': 1028, 'def': 'cheese formed in long strings twisted together', 'name': 'string_cheese'}, {'frequency': 'r', 'synset': 'stylus.n.02', 'synonyms': ['stylus'], 'id': 1029, 'def': 'a pointed tool for writing or drawing or engraving, including pens', 'name': 'stylus'}, {'frequency': 'r', 'synset': 'subwoofer.n.01', 'synonyms': ['subwoofer'], 'id': 1030, 'def': 'a loudspeaker that is designed to reproduce very low bass frequencies', 'name': 'subwoofer'}, {'frequency': 'r', 'synset': 'sugar_bowl.n.01', 'synonyms': ['sugar_bowl'], 'id': 1031, 'def': 'a dish in which sugar is served', 'name': 'sugar_bowl'}, {'frequency': 'r', 'synset': 'sugarcane.n.01', 'synonyms': ['sugarcane_(plant)'], 'id': 1032, 'def': 'juicy canes whose sap is a source of molasses and commercial sugar; fresh canes are sometimes chewed for the juice', 'name': 'sugarcane_(plant)'}, {'frequency': 'f', 'synset': 'suit.n.01', 'synonyms': ['suit_(clothing)'], 'id': 1033, 'def': 'a set of garments (usually including a jacket and trousers or skirt) for outerwear all of the same fabric and color', 'name': 'suit_(clothing)'}, {'frequency': 'c', 'synset': 'sunflower.n.01', 'synonyms': ['sunflower'], 'id': 1034, 'def': 'any plant of the genus Helianthus having large flower heads with dark disk florets and showy yellow rays', 'name': 'sunflower'}, {'frequency': 'f', 'synset': 'sunglasses.n.01', 'synonyms': ['sunglasses'], 'id': 1035, 'def': 'spectacles that are darkened or polarized to protect the eyes from the glare of the sun', 'name': 'sunglasses'}, {'frequency': 'c', 'synset': 'sunhat.n.01', 'synonyms': ['sunhat'], 'id': 1036, 'def': 'a hat with a broad brim that protects the face from direct exposure to the sun', 'name': 'sunhat'}, {'frequency': 'f', 'synset': 'surfboard.n.01', 'synonyms': ['surfboard'], 'id': 1037, 'def': 'a narrow buoyant board for riding surf', 'name': 'surfboard'}, {'frequency': 'c', 'synset': 'sushi.n.01', 'synonyms': ['sushi'], 'id': 1038, 'def': 'rice (with raw fish) wrapped in seaweed', 'name': 'sushi'}, {'frequency': 'c', 'synset': 'swab.n.02', 'synonyms': ['mop'], 'id': 1039, 'def': 'cleaning implement consisting of absorbent material fastened to a handle; for cleaning floors', 'name': 'mop'}, {'frequency': 'c', 'synset': 'sweat_pants.n.01', 'synonyms': ['sweat_pants'], 'id': 1040, 'def': 'loose-fitting trousers with elastic cuffs; worn by athletes', 'name': 'sweat_pants'}, {'frequency': 'c', 'synset': 'sweatband.n.02', 'synonyms': ['sweatband'], 'id': 1041, 'def': 'a band of material tied around the forehead or wrist to absorb sweat', 'name': 'sweatband'}, {'frequency': 'f', 'synset': 'sweater.n.01', 'synonyms': ['sweater'], 'id': 1042, 'def': 'a crocheted or knitted garment covering the upper part of the body', 'name': 'sweater'}, {'frequency': 'f', 'synset': 'sweatshirt.n.01', 'synonyms': ['sweatshirt'], 'id': 1043, 'def': 'cotton knit pullover with long sleeves worn during athletic activity', 'name': 'sweatshirt'}, {'frequency': 'c', 'synset': 'sweet_potato.n.02', 'synonyms': ['sweet_potato'], 'id': 1044, 'def': 'the edible tuberous root of the sweet potato vine', 'name': 'sweet_potato'}, {'frequency': 'f', 'synset': 'swimsuit.n.01', 'synonyms': ['swimsuit', 'swimwear', 'bathing_suit', 'swimming_costume', 'bathing_costume', 'swimming_trunks', 'bathing_trunks'], 'id': 1045, 'def': 'garment worn for swimming', 'name': 'swimsuit'}, {'frequency': 'c', 'synset': 'sword.n.01', 'synonyms': ['sword'], 'id': 1046, 'def': 'a cutting or thrusting weapon that has a long metal blade', 'name': 'sword'}, {'frequency': 'r', 'synset': 'syringe.n.01', 'synonyms': ['syringe'], 'id': 1047, 'def': 'a medical instrument used to inject or withdraw fluids', 'name': 'syringe'}, {'frequency': 'r', 'synset': 'tabasco.n.02', 'synonyms': ['Tabasco_sauce'], 'id': 1048, 'def': 'very spicy sauce (trade name Tabasco) made from fully-aged red peppers', 'name': 'Tabasco_sauce'}, {'frequency': 'r', 'synset': 'table-tennis_table.n.01', 'synonyms': ['table-tennis_table', 'ping-pong_table'], 'id': 1049, 'def': 'a table used for playing table tennis', 'name': 'table-tennis_table'}, {'frequency': 'f', 'synset': 'table.n.02', 'synonyms': ['table'], 'id': 1050, 'def': 'a piece of furniture having a smooth flat top that is usually supported by one or more vertical legs', 'name': 'table'}, {'frequency': 'c', 'synset': 'table_lamp.n.01', 'synonyms': ['table_lamp'], 'id': 1051, 'def': 'a lamp that sits on a table', 'name': 'table_lamp'}, {'frequency': 'f', 'synset': 'tablecloth.n.01', 'synonyms': ['tablecloth'], 'id': 1052, 'def': 'a covering spread over a dining table', 'name': 'tablecloth'}, {'frequency': 'r', 'synset': 'tachometer.n.01', 'synonyms': ['tachometer'], 'id': 1053, 'def': 'measuring instrument for indicating speed of rotation', 'name': 'tachometer'}, {'frequency': 'r', 'synset': 'taco.n.02', 'synonyms': ['taco'], 'id': 1054, 'def': 'a small tortilla cupped around a filling', 'name': 'taco'}, {'frequency': 'f', 'synset': 'tag.n.02', 'synonyms': ['tag'], 'id': 1055, 'def': 'a label associated with something for the purpose of identification or information', 'name': 'tag'}, {'frequency': 'f', 'synset': 'taillight.n.01', 'synonyms': ['taillight', 'rear_light'], 'id': 1056, 'def': 'lamp (usually red) mounted at the rear of a motor vehicle', 'name': 'taillight'}, {'frequency': 'r', 'synset': 'tambourine.n.01', 'synonyms': ['tambourine'], 'id': 1057, 'def': 'a shallow drum with a single drumhead and with metallic disks in the sides', 'name': 'tambourine'}, {'frequency': 'r', 'synset': 'tank.n.01', 'synonyms': ['army_tank', 'armored_combat_vehicle', 'armoured_combat_vehicle'], 'id': 1058, 'def': 'an enclosed armored military vehicle; has a cannon and moves on caterpillar treads', 'name': 'army_tank'}, {'frequency': 'f', 'synset': 'tank.n.02', 'synonyms': ['tank_(storage_vessel)', 'storage_tank'], 'id': 1059, 'def': 'a large (usually metallic) vessel for holding gases or liquids', 'name': 'tank_(storage_vessel)'}, {'frequency': 'f', 'synset': 'tank_top.n.01', 'synonyms': ['tank_top_(clothing)'], 'id': 1060, 'def': 'a tight-fitting sleeveless shirt with wide shoulder straps and low neck and no front opening', 'name': 'tank_top_(clothing)'}, {'frequency': 'f', 'synset': 'tape.n.01', 'synonyms': ['tape_(sticky_cloth_or_paper)'], 'id': 1061, 'def': 'a long thin piece of cloth or paper as used for binding or fastening', 'name': 'tape_(sticky_cloth_or_paper)'}, {'frequency': 'c', 'synset': 'tape.n.04', 'synonyms': ['tape_measure', 'measuring_tape'], 'id': 1062, 'def': 'measuring instrument consisting of a narrow strip (cloth or metal) marked in inches or centimeters and used for measuring lengths', 'name': 'tape_measure'}, {'frequency': 'c', 'synset': 'tapestry.n.02', 'synonyms': ['tapestry'], 'id': 1063, 'def': 'a heavy textile with a woven design; used for curtains and upholstery', 'name': 'tapestry'}, {'frequency': 'f', 'synset': 'tarpaulin.n.01', 'synonyms': ['tarp'], 'id': 1064, 'def': 'waterproofed canvas', 'name': 'tarp'}, {'frequency': 'c', 'synset': 'tartan.n.01', 'synonyms': ['tartan', 'plaid'], 'id': 1065, 'def': 'a cloth having a crisscross design', 'name': 'tartan'}, {'frequency': 'c', 'synset': 'tassel.n.01', 'synonyms': ['tassel'], 'id': 1066, 'def': 'adornment consisting of a bunch of cords fastened at one end', 'name': 'tassel'}, {'frequency': 'c', 'synset': 'tea_bag.n.01', 'synonyms': ['tea_bag'], 'id': 1067, 'def': 'a measured amount of tea in a bag for an individual serving of tea', 'name': 'tea_bag'}, {'frequency': 'c', 'synset': 'teacup.n.02', 'synonyms': ['teacup'], 'id': 1068, 'def': 'a cup from which tea is drunk', 'name': 'teacup'}, {'frequency': 'c', 'synset': 'teakettle.n.01', 'synonyms': ['teakettle'], 'id': 1069, 'def': 'kettle for boiling water to make tea', 'name': 'teakettle'}, {'frequency': 'f', 'synset': 'teapot.n.01', 'synonyms': ['teapot'], 'id': 1070, 'def': 'pot for brewing tea; usually has a spout and handle', 'name': 'teapot'}, {'frequency': 'f', 'synset': 'teddy.n.01', 'synonyms': ['teddy_bear'], 'id': 1071, 'def': "plaything consisting of a child's toy bear (usually plush and stuffed with soft materials)", 'name': 'teddy_bear'}, {'frequency': 'f', 'synset': 'telephone.n.01', 'synonyms': ['telephone', 'phone', 'telephone_set'], 'id': 1072, 'def': 'electronic device for communicating by voice over long distances (includes wired and wireless/cell phones)', 'name': 'telephone'}, {'frequency': 'c', 'synset': 'telephone_booth.n.01', 'synonyms': ['telephone_booth', 'phone_booth', 'call_box', 'telephone_box', 'telephone_kiosk'], 'id': 1073, 'def': 'booth for using a telephone', 'name': 'telephone_booth'}, {'frequency': 'f', 'synset': 'telephone_pole.n.01', 'synonyms': ['telephone_pole', 'telegraph_pole', 'telegraph_post'], 'id': 1074, 'def': 'tall pole supporting telephone wires', 'name': 'telephone_pole'}, {'frequency': 'r', 'synset': 'telephoto_lens.n.01', 'synonyms': ['telephoto_lens', 'zoom_lens'], 'id': 1075, 'def': 'a camera lens that magnifies the image', 'name': 'telephoto_lens'}, {'frequency': 'c', 'synset': 'television_camera.n.01', 'synonyms': ['television_camera', 'tv_camera'], 'id': 1076, 'def': 'television equipment for capturing and recording video', 'name': 'television_camera'}, {'frequency': 'f', 'synset': 'television_receiver.n.01', 'synonyms': ['television_set', 'tv', 'tv_set'], 'id': 1077, 'def': 'an electronic device that receives television signals and displays them on a screen', 'name': 'television_set'}, {'frequency': 'f', 'synset': 'tennis_ball.n.01', 'synonyms': ['tennis_ball'], 'id': 1078, 'def': 'ball about the size of a fist used in playing tennis', 'name': 'tennis_ball'}, {'frequency': 'f', 'synset': 'tennis_racket.n.01', 'synonyms': ['tennis_racket'], 'id': 1079, 'def': 'a racket used to play tennis', 'name': 'tennis_racket'}, {'frequency': 'r', 'synset': 'tequila.n.01', 'synonyms': ['tequila'], 'id': 1080, 'def': 'Mexican liquor made from fermented juices of an agave plant', 'name': 'tequila'}, {'frequency': 'c', 'synset': 'thermometer.n.01', 'synonyms': ['thermometer'], 'id': 1081, 'def': 'measuring instrument for measuring temperature', 'name': 'thermometer'}, {'frequency': 'c', 'synset': 'thermos.n.01', 'synonyms': ['thermos_bottle'], 'id': 1082, 'def': 'vacuum flask that preserves temperature of hot or cold drinks', 'name': 'thermos_bottle'}, {'frequency': 'f', 'synset': 'thermostat.n.01', 'synonyms': ['thermostat'], 'id': 1083, 'def': 'a regulator for automatically regulating temperature by starting or stopping the supply of heat', 'name': 'thermostat'}, {'frequency': 'r', 'synset': 'thimble.n.02', 'synonyms': ['thimble'], 'id': 1084, 'def': 'a small metal cap to protect the finger while sewing; can be used as a small container', 'name': 'thimble'}, {'frequency': 'c', 'synset': 'thread.n.01', 'synonyms': ['thread', 'yarn'], 'id': 1085, 'def': 'a fine cord of twisted fibers (of cotton or silk or wool or nylon etc.) used in sewing and weaving', 'name': 'thread'}, {'frequency': 'c', 'synset': 'thumbtack.n.01', 'synonyms': ['thumbtack', 'drawing_pin', 'pushpin'], 'id': 1086, 'def': 'a tack for attaching papers to a bulletin board or drawing board', 'name': 'thumbtack'}, {'frequency': 'c', 'synset': 'tiara.n.01', 'synonyms': ['tiara'], 'id': 1087, 'def': 'a jeweled headdress worn by women on formal occasions', 'name': 'tiara'}, {'frequency': 'c', 'synset': 'tiger.n.02', 'synonyms': ['tiger'], 'id': 1088, 'def': 'large feline of forests in most of Asia having a tawny coat with black stripes', 'name': 'tiger'}, {'frequency': 'c', 'synset': 'tights.n.01', 'synonyms': ['tights_(clothing)', 'leotards'], 'id': 1089, 'def': 'skintight knit hose covering the body from the waist to the feet worn by acrobats and dancers and as stockings by women and girls', 'name': 'tights_(clothing)'}, {'frequency': 'c', 'synset': 'timer.n.01', 'synonyms': ['timer', 'stopwatch'], 'id': 1090, 'def': 'a timepiece that measures a time interval and signals its end', 'name': 'timer'}, {'frequency': 'f', 'synset': 'tinfoil.n.01', 'synonyms': ['tinfoil'], 'id': 1091, 'def': 'foil made of tin or an alloy of tin and lead', 'name': 'tinfoil'}, {'frequency': 'c', 'synset': 'tinsel.n.01', 'synonyms': ['tinsel'], 'id': 1092, 'def': 'a showy decoration that is basically valueless', 'name': 'tinsel'}, {'frequency': 'f', 'synset': 'tissue.n.02', 'synonyms': ['tissue_paper'], 'id': 1093, 'def': 'a soft thin (usually translucent) paper', 'name': 'tissue_paper'}, {'frequency': 'c', 'synset': 'toast.n.01', 'synonyms': ['toast_(food)'], 'id': 1094, 'def': 'slice of bread that has been toasted', 'name': 'toast_(food)'}, {'frequency': 'f', 'synset': 'toaster.n.02', 'synonyms': ['toaster'], 'id': 1095, 'def': 'a kitchen appliance (usually electric) for toasting bread', 'name': 'toaster'}, {'frequency': 'f', 'synset': 'toaster_oven.n.01', 'synonyms': ['toaster_oven'], 'id': 1096, 'def': 'kitchen appliance consisting of a small electric oven for toasting or warming food', 'name': 'toaster_oven'}, {'frequency': 'f', 'synset': 'toilet.n.02', 'synonyms': ['toilet'], 'id': 1097, 'def': 'a plumbing fixture for defecation and urination', 'name': 'toilet'}, {'frequency': 'f', 'synset': 'toilet_tissue.n.01', 'synonyms': ['toilet_tissue', 'toilet_paper', 'bathroom_tissue'], 'id': 1098, 'def': 'a soft thin absorbent paper for use in toilets', 'name': 'toilet_tissue'}, {'frequency': 'f', 'synset': 'tomato.n.01', 'synonyms': ['tomato'], 'id': 1099, 'def': 'mildly acid red or yellow pulpy fruit eaten as a vegetable', 'name': 'tomato'}, {'frequency': 'f', 'synset': 'tongs.n.01', 'synonyms': ['tongs'], 'id': 1100, 'def': 'any of various devices for taking hold of objects; usually have two hinged legs with handles above and pointed hooks below', 'name': 'tongs'}, {'frequency': 'c', 'synset': 'toolbox.n.01', 'synonyms': ['toolbox'], 'id': 1101, 'def': 'a box or chest or cabinet for holding hand tools', 'name': 'toolbox'}, {'frequency': 'f', 'synset': 'toothbrush.n.01', 'synonyms': ['toothbrush'], 'id': 1102, 'def': 'small brush; has long handle; used to clean teeth', 'name': 'toothbrush'}, {'frequency': 'f', 'synset': 'toothpaste.n.01', 'synonyms': ['toothpaste'], 'id': 1103, 'def': 'a dentifrice in the form of a paste', 'name': 'toothpaste'}, {'frequency': 'f', 'synset': 'toothpick.n.01', 'synonyms': ['toothpick'], 'id': 1104, 'def': 'pick consisting of a small strip of wood or plastic; used to pick food from between the teeth', 'name': 'toothpick'}, {'frequency': 'f', 'synset': 'top.n.09', 'synonyms': ['cover'], 'id': 1105, 'def': 'covering for a hole (especially a hole in the top of a container)', 'name': 'cover'}, {'frequency': 'c', 'synset': 'tortilla.n.01', 'synonyms': ['tortilla'], 'id': 1106, 'def': 'thin unleavened pancake made from cornmeal or wheat flour', 'name': 'tortilla'}, {'frequency': 'c', 'synset': 'tow_truck.n.01', 'synonyms': ['tow_truck'], 'id': 1107, 'def': 'a truck equipped to hoist and pull wrecked cars (or to remove cars from no-parking zones)', 'name': 'tow_truck'}, {'frequency': 'f', 'synset': 'towel.n.01', 'synonyms': ['towel'], 'id': 1108, 'def': 'a rectangular piece of absorbent cloth (or paper) for drying or wiping', 'name': 'towel'}, {'frequency': 'f', 'synset': 'towel_rack.n.01', 'synonyms': ['towel_rack', 'towel_rail', 'towel_bar'], 'id': 1109, 'def': 'a rack consisting of one or more bars on which towels can be hung', 'name': 'towel_rack'}, {'frequency': 'f', 'synset': 'toy.n.03', 'synonyms': ['toy'], 'id': 1110, 'def': 'a device regarded as providing amusement', 'name': 'toy'}, {'frequency': 'c', 'synset': 'tractor.n.01', 'synonyms': ['tractor_(farm_equipment)'], 'id': 1111, 'def': 'a wheeled vehicle with large wheels; used in farming and other applications', 'name': 'tractor_(farm_equipment)'}, {'frequency': 'f', 'synset': 'traffic_light.n.01', 'synonyms': ['traffic_light'], 'id': 1112, 'def': 'a device to control vehicle traffic often consisting of three or more lights', 'name': 'traffic_light'}, {'frequency': 'c', 'synset': 'trail_bike.n.01', 'synonyms': ['dirt_bike'], 'id': 1113, 'def': 'a lightweight motorcycle equipped with rugged tires and suspension for off-road use', 'name': 'dirt_bike'}, {'frequency': 'f', 'synset': 'trailer_truck.n.01', 'synonyms': ['trailer_truck', 'tractor_trailer', 'trucking_rig', 'articulated_lorry', 'semi_truck'], 'id': 1114, 'def': 'a truck consisting of a tractor and trailer together', 'name': 'trailer_truck'}, {'frequency': 'f', 'synset': 'train.n.01', 'synonyms': ['train_(railroad_vehicle)', 'railroad_train'], 'id': 1115, 'def': 'public or private transport provided by a line of railway cars coupled together and drawn by a locomotive', 'name': 'train_(railroad_vehicle)'}, {'frequency': 'r', 'synset': 'trampoline.n.01', 'synonyms': ['trampoline'], 'id': 1116, 'def': 'gymnastic apparatus consisting of a strong canvas sheet attached with springs to a metal frame', 'name': 'trampoline'}, {'frequency': 'f', 'synset': 'tray.n.01', 'synonyms': ['tray'], 'id': 1117, 'def': 'an open receptacle for holding or displaying or serving articles or food', 'name': 'tray'}, {'frequency': 'r', 'synset': 'trench_coat.n.01', 'synonyms': ['trench_coat'], 'id': 1118, 'def': 'a military style raincoat; belted with deep pockets', 'name': 'trench_coat'}, {'frequency': 'r', 'synset': 'triangle.n.05', 'synonyms': ['triangle_(musical_instrument)'], 'id': 1119, 'def': 'a percussion instrument consisting of a metal bar bent in the shape of an open triangle', 'name': 'triangle_(musical_instrument)'}, {'frequency': 'c', 'synset': 'tricycle.n.01', 'synonyms': ['tricycle'], 'id': 1120, 'def': 'a vehicle with three wheels that is moved by foot pedals', 'name': 'tricycle'}, {'frequency': 'f', 'synset': 'tripod.n.01', 'synonyms': ['tripod'], 'id': 1121, 'def': 'a three-legged rack used for support', 'name': 'tripod'}, {'frequency': 'f', 'synset': 'trouser.n.01', 'synonyms': ['trousers', 'pants_(clothing)'], 'id': 1122, 'def': 'a garment extending from the waist to the knee or ankle, covering each leg separately', 'name': 'trousers'}, {'frequency': 'f', 'synset': 'truck.n.01', 'synonyms': ['truck'], 'id': 1123, 'def': 'an automotive vehicle suitable for hauling', 'name': 'truck'}, {'frequency': 'r', 'synset': 'truffle.n.03', 'synonyms': ['truffle_(chocolate)', 'chocolate_truffle'], 'id': 1124, 'def': 'creamy chocolate candy', 'name': 'truffle_(chocolate)'}, {'frequency': 'c', 'synset': 'trunk.n.02', 'synonyms': ['trunk'], 'id': 1125, 'def': 'luggage consisting of a large strong case used when traveling or for storage', 'name': 'trunk'}, {'frequency': 'r', 'synset': 'tub.n.02', 'synonyms': ['vat'], 'id': 1126, 'def': 'a large vessel for holding or storing liquids', 'name': 'vat'}, {'frequency': 'c', 'synset': 'turban.n.01', 'synonyms': ['turban'], 'id': 1127, 'def': 'a traditional headdress consisting of a long scarf wrapped around the head', 'name': 'turban'}, {'frequency': 'c', 'synset': 'turkey.n.04', 'synonyms': ['turkey_(food)'], 'id': 1128, 'def': 'flesh of large domesticated fowl usually roasted', 'name': 'turkey_(food)'}, {'frequency': 'r', 'synset': 'turnip.n.01', 'synonyms': ['turnip'], 'id': 1129, 'def': 'widely cultivated plant having a large fleshy edible white or yellow root', 'name': 'turnip'}, {'frequency': 'c', 'synset': 'turtle.n.02', 'synonyms': ['turtle'], 'id': 1130, 'def': 'any of various aquatic and land reptiles having a bony shell and flipper-like limbs for swimming', 'name': 'turtle'}, {'frequency': 'c', 'synset': 'turtleneck.n.01', 'synonyms': ['turtleneck_(clothing)', 'polo-neck'], 'id': 1131, 'def': 'a sweater or jersey with a high close-fitting collar', 'name': 'turtleneck_(clothing)'}, {'frequency': 'c', 'synset': 'typewriter.n.01', 'synonyms': ['typewriter'], 'id': 1132, 'def': 'hand-operated character printer for printing written messages one character at a time', 'name': 'typewriter'}, {'frequency': 'f', 'synset': 'umbrella.n.01', 'synonyms': ['umbrella'], 'id': 1133, 'def': 'a lightweight handheld collapsible canopy', 'name': 'umbrella'}, {'frequency': 'f', 'synset': 'underwear.n.01', 'synonyms': ['underwear', 'underclothes', 'underclothing', 'underpants'], 'id': 1134, 'def': 'undergarment worn next to the skin and under the outer garments', 'name': 'underwear'}, {'frequency': 'r', 'synset': 'unicycle.n.01', 'synonyms': ['unicycle'], 'id': 1135, 'def': 'a vehicle with a single wheel that is driven by pedals', 'name': 'unicycle'}, {'frequency': 'f', 'synset': 'urinal.n.01', 'synonyms': ['urinal'], 'id': 1136, 'def': 'a plumbing fixture (usually attached to the wall) used by men to urinate', 'name': 'urinal'}, {'frequency': 'c', 'synset': 'urn.n.01', 'synonyms': ['urn'], 'id': 1137, 'def': 'a large vase that usually has a pedestal or feet', 'name': 'urn'}, {'frequency': 'c', 'synset': 'vacuum.n.04', 'synonyms': ['vacuum_cleaner'], 'id': 1138, 'def': 'an electrical home appliance that cleans by suction', 'name': 'vacuum_cleaner'}, {'frequency': 'f', 'synset': 'vase.n.01', 'synonyms': ['vase'], 'id': 1139, 'def': 'an open jar of glass or porcelain used as an ornament or to hold flowers', 'name': 'vase'}, {'frequency': 'c', 'synset': 'vending_machine.n.01', 'synonyms': ['vending_machine'], 'id': 1140, 'def': 'a slot machine for selling goods', 'name': 'vending_machine'}, {'frequency': 'f', 'synset': 'vent.n.01', 'synonyms': ['vent', 'blowhole', 'air_vent'], 'id': 1141, 'def': 'a hole for the escape of gas or air', 'name': 'vent'}, {'frequency': 'f', 'synset': 'vest.n.01', 'synonyms': ['vest', 'waistcoat'], 'id': 1142, 'def': "a man's sleeveless garment worn underneath a coat", 'name': 'vest'}, {'frequency': 'c', 'synset': 'videotape.n.01', 'synonyms': ['videotape'], 'id': 1143, 'def': 'a video recording made on magnetic tape', 'name': 'videotape'}, {'frequency': 'r', 'synset': 'vinegar.n.01', 'synonyms': ['vinegar'], 'id': 1144, 'def': 'sour-tasting liquid produced usually by oxidation of the alcohol in wine or cider and used as a condiment or food preservative', 'name': 'vinegar'}, {'frequency': 'r', 'synset': 'violin.n.01', 'synonyms': ['violin', 'fiddle'], 'id': 1145, 'def': 'bowed stringed instrument that is the highest member of the violin family', 'name': 'violin'}, {'frequency': 'r', 'synset': 'vodka.n.01', 'synonyms': ['vodka'], 'id': 1146, 'def': 'unaged colorless liquor originating in Russia', 'name': 'vodka'}, {'frequency': 'c', 'synset': 'volleyball.n.02', 'synonyms': ['volleyball'], 'id': 1147, 'def': 'an inflated ball used in playing volleyball', 'name': 'volleyball'}, {'frequency': 'r', 'synset': 'vulture.n.01', 'synonyms': ['vulture'], 'id': 1148, 'def': 'any of various large birds of prey having naked heads and weak claws and feeding chiefly on carrion', 'name': 'vulture'}, {'frequency': 'c', 'synset': 'waffle.n.01', 'synonyms': ['waffle'], 'id': 1149, 'def': 'pancake batter baked in a waffle iron', 'name': 'waffle'}, {'frequency': 'r', 'synset': 'waffle_iron.n.01', 'synonyms': ['waffle_iron'], 'id': 1150, 'def': 'a kitchen appliance for baking waffles', 'name': 'waffle_iron'}, {'frequency': 'c', 'synset': 'wagon.n.01', 'synonyms': ['wagon'], 'id': 1151, 'def': 'any of various kinds of wheeled vehicles drawn by an animal or a tractor', 'name': 'wagon'}, {'frequency': 'c', 'synset': 'wagon_wheel.n.01', 'synonyms': ['wagon_wheel'], 'id': 1152, 'def': 'a wheel of a wagon', 'name': 'wagon_wheel'}, {'frequency': 'c', 'synset': 'walking_stick.n.01', 'synonyms': ['walking_stick'], 'id': 1153, 'def': 'a stick carried in the hand for support in walking', 'name': 'walking_stick'}, {'frequency': 'c', 'synset': 'wall_clock.n.01', 'synonyms': ['wall_clock'], 'id': 1154, 'def': 'a clock mounted on a wall', 'name': 'wall_clock'}, {'frequency': 'f', 'synset': 'wall_socket.n.01', 'synonyms': ['wall_socket', 'wall_plug', 'electric_outlet', 'electrical_outlet', 'outlet', 'electric_receptacle'], 'id': 1155, 'def': 'receptacle providing a place in a wiring system where current can be taken to run electrical devices', 'name': 'wall_socket'}, {'frequency': 'f', 'synset': 'wallet.n.01', 'synonyms': ['wallet', 'billfold'], 'id': 1156, 'def': 'a pocket-size case for holding papers and paper money', 'name': 'wallet'}, {'frequency': 'r', 'synset': 'walrus.n.01', 'synonyms': ['walrus'], 'id': 1157, 'def': 'either of two large northern marine mammals having ivory tusks and tough hide over thick blubber', 'name': 'walrus'}, {'frequency': 'r', 'synset': 'wardrobe.n.01', 'synonyms': ['wardrobe'], 'id': 1158, 'def': 'a tall piece of furniture that provides storage space for clothes; has a door and rails or hooks for hanging clothes', 'name': 'wardrobe'}, {'frequency': 'r', 'synset': 'washbasin.n.01', 'synonyms': ['washbasin', 'basin_(for_washing)', 'washbowl', 'washstand', 'handbasin'], 'id': 1159, 'def': 'a bathroom sink that is permanently installed and connected to a water supply and drainpipe; where you can wash your hands and face', 'name': 'washbasin'}, {'frequency': 'c', 'synset': 'washer.n.03', 'synonyms': ['automatic_washer', 'washing_machine'], 'id': 1160, 'def': 'a home appliance for washing clothes and linens automatically', 'name': 'automatic_washer'}, {'frequency': 'f', 'synset': 'watch.n.01', 'synonyms': ['watch', 'wristwatch'], 'id': 1161, 'def': 'a small, portable timepiece', 'name': 'watch'}, {'frequency': 'f', 'synset': 'water_bottle.n.01', 'synonyms': ['water_bottle'], 'id': 1162, 'def': 'a bottle for holding water', 'name': 'water_bottle'}, {'frequency': 'c', 'synset': 'water_cooler.n.01', 'synonyms': ['water_cooler'], 'id': 1163, 'def': 'a device for cooling and dispensing drinking water', 'name': 'water_cooler'}, {'frequency': 'c', 'synset': 'water_faucet.n.01', 'synonyms': ['water_faucet', 'water_tap', 'tap_(water_faucet)'], 'id': 1164, 'def': 'a faucet for drawing water from a pipe or cask', 'name': 'water_faucet'}, {'frequency': 'r', 'synset': 'water_heater.n.01', 'synonyms': ['water_heater', 'hot-water_heater'], 'id': 1165, 'def': 'a heater and storage tank to supply heated water', 'name': 'water_heater'}, {'frequency': 'c', 'synset': 'water_jug.n.01', 'synonyms': ['water_jug'], 'id': 1166, 'def': 'a jug that holds water', 'name': 'water_jug'}, {'frequency': 'r', 'synset': 'water_pistol.n.01', 'synonyms': ['water_gun', 'squirt_gun'], 'id': 1167, 'def': 'plaything consisting of a toy pistol that squirts water', 'name': 'water_gun'}, {'frequency': 'c', 'synset': 'water_scooter.n.01', 'synonyms': ['water_scooter', 'sea_scooter', 'jet_ski'], 'id': 1168, 'def': 'a motorboat resembling a motor scooter (NOT A SURFBOARD OR WATER SKI)', 'name': 'water_scooter'}, {'frequency': 'c', 'synset': 'water_ski.n.01', 'synonyms': ['water_ski'], 'id': 1169, 'def': 'broad ski for skimming over water towed by a speedboat (DO NOT MARK WATER)', 'name': 'water_ski'}, {'frequency': 'c', 'synset': 'water_tower.n.01', 'synonyms': ['water_tower'], 'id': 1170, 'def': 'a large reservoir for water', 'name': 'water_tower'}, {'frequency': 'c', 'synset': 'watering_can.n.01', 'synonyms': ['watering_can'], 'id': 1171, 'def': 'a container with a handle and a spout with a perforated nozzle; used to sprinkle water over plants', 'name': 'watering_can'}, {'frequency': 'f', 'synset': 'watermelon.n.02', 'synonyms': ['watermelon'], 'id': 1172, 'def': 'large oblong or roundish melon with a hard green rind and sweet watery red or occasionally yellowish pulp', 'name': 'watermelon'}, {'frequency': 'f', 'synset': 'weathervane.n.01', 'synonyms': ['weathervane', 'vane_(weathervane)', 'wind_vane'], 'id': 1173, 'def': 'mechanical device attached to an elevated structure; rotates freely to show the direction of the wind', 'name': 'weathervane'}, {'frequency': 'c', 'synset': 'webcam.n.01', 'synonyms': ['webcam'], 'id': 1174, 'def': 'a digital camera designed to take digital photographs and transmit them over the internet', 'name': 'webcam'}, {'frequency': 'c', 'synset': 'wedding_cake.n.01', 'synonyms': ['wedding_cake', 'bridecake'], 'id': 1175, 'def': 'a rich cake with two or more tiers and covered with frosting and decorations; served at a wedding reception', 'name': 'wedding_cake'}, {'frequency': 'c', 'synset': 'wedding_ring.n.01', 'synonyms': ['wedding_ring', 'wedding_band'], 'id': 1176, 'def': 'a ring given to the bride and/or groom at the wedding', 'name': 'wedding_ring'}, {'frequency': 'f', 'synset': 'wet_suit.n.01', 'synonyms': ['wet_suit'], 'id': 1177, 'def': 'a close-fitting garment made of a permeable material; worn in cold water to retain body heat', 'name': 'wet_suit'}, {'frequency': 'f', 'synset': 'wheel.n.01', 'synonyms': ['wheel'], 'id': 1178, 'def': 'a circular frame with spokes (or a solid disc) that can rotate on a shaft or axle', 'name': 'wheel'}, {'frequency': 'c', 'synset': 'wheelchair.n.01', 'synonyms': ['wheelchair'], 'id': 1179, 'def': 'a movable chair mounted on large wheels', 'name': 'wheelchair'}, {'frequency': 'c', 'synset': 'whipped_cream.n.01', 'synonyms': ['whipped_cream'], 'id': 1180, 'def': 'cream that has been beaten until light and fluffy', 'name': 'whipped_cream'}, {'frequency': 'c', 'synset': 'whistle.n.03', 'synonyms': ['whistle'], 'id': 1181, 'def': 'a small wind instrument that produces a whistling sound by blowing into it', 'name': 'whistle'}, {'frequency': 'c', 'synset': 'wig.n.01', 'synonyms': ['wig'], 'id': 1182, 'def': 'hairpiece covering the head and made of real or synthetic hair', 'name': 'wig'}, {'frequency': 'c', 'synset': 'wind_chime.n.01', 'synonyms': ['wind_chime'], 'id': 1183, 'def': 'a decorative arrangement of pieces of metal or glass or pottery that hang together loosely so the wind can cause them to tinkle', 'name': 'wind_chime'}, {'frequency': 'c', 'synset': 'windmill.n.01', 'synonyms': ['windmill'], 'id': 1184, 'def': 'A mill or turbine that is powered by wind', 'name': 'windmill'}, {'frequency': 'c', 'synset': 'window_box.n.01', 'synonyms': ['window_box_(for_plants)'], 'id': 1185, 'def': 'a container for growing plants on a windowsill', 'name': 'window_box_(for_plants)'}, {'frequency': 'f', 'synset': 'windshield_wiper.n.01', 'synonyms': ['windshield_wiper', 'windscreen_wiper', 'wiper_(for_windshield/screen)'], 'id': 1186, 'def': 'a mechanical device that cleans the windshield', 'name': 'windshield_wiper'}, {'frequency': 'c', 'synset': 'windsock.n.01', 'synonyms': ['windsock', 'air_sock', 'air-sleeve', 'wind_sleeve', 'wind_cone'], 'id': 1187, 'def': 'a truncated cloth cone mounted on a mast/pole; shows wind direction', 'name': 'windsock'}, {'frequency': 'f', 'synset': 'wine_bottle.n.01', 'synonyms': ['wine_bottle'], 'id': 1188, 'def': 'a bottle for holding wine', 'name': 'wine_bottle'}, {'frequency': 'c', 'synset': 'wine_bucket.n.01', 'synonyms': ['wine_bucket', 'wine_cooler'], 'id': 1189, 'def': 'a bucket of ice used to chill a bottle of wine', 'name': 'wine_bucket'}, {'frequency': 'f', 'synset': 'wineglass.n.01', 'synonyms': ['wineglass'], 'id': 1190, 'def': 'a glass that has a stem and in which wine is served', 'name': 'wineglass'}, {'frequency': 'f', 'synset': 'winker.n.02', 'synonyms': ['blinder_(for_horses)'], 'id': 1191, 'def': 'blinds that prevent a horse from seeing something on either side', 'name': 'blinder_(for_horses)'}, {'frequency': 'c', 'synset': 'wok.n.01', 'synonyms': ['wok'], 'id': 1192, 'def': 'pan with a convex bottom; used for frying in Chinese cooking', 'name': 'wok'}, {'frequency': 'r', 'synset': 'wolf.n.01', 'synonyms': ['wolf'], 'id': 1193, 'def': 'a wild carnivorous mammal of the dog family, living and hunting in packs', 'name': 'wolf'}, {'frequency': 'c', 'synset': 'wooden_spoon.n.02', 'synonyms': ['wooden_spoon'], 'id': 1194, 'def': 'a spoon made of wood', 'name': 'wooden_spoon'}, {'frequency': 'c', 'synset': 'wreath.n.01', 'synonyms': ['wreath'], 'id': 1195, 'def': 'an arrangement of flowers, leaves, or stems fastened in a ring', 'name': 'wreath'}, {'frequency': 'c', 'synset': 'wrench.n.03', 'synonyms': ['wrench', 'spanner'], 'id': 1196, 'def': 'a hand tool that is used to hold or twist a nut or bolt', 'name': 'wrench'}, {'frequency': 'f', 'synset': 'wristband.n.01', 'synonyms': ['wristband'], 'id': 1197, 'def': 'band consisting of a part of a sleeve that covers the wrist', 'name': 'wristband'}, {'frequency': 'f', 'synset': 'wristlet.n.01', 'synonyms': ['wristlet', 'wrist_band'], 'id': 1198, 'def': 'a band or bracelet worn around the wrist', 'name': 'wristlet'}, {'frequency': 'c', 'synset': 'yacht.n.01', 'synonyms': ['yacht'], 'id': 1199, 'def': 'an expensive vessel propelled by sail or power and used for cruising or racing', 'name': 'yacht'}, {'frequency': 'c', 'synset': 'yogurt.n.01', 'synonyms': ['yogurt', 'yoghurt', 'yoghourt'], 'id': 1200, 'def': 'a custard-like food made from curdled milk', 'name': 'yogurt'}, {'frequency': 'c', 'synset': 'yoke.n.07', 'synonyms': ['yoke_(animal_equipment)'], 'id': 1201, 'def': 'gear joining two animals at the neck; NOT egg yolk', 'name': 'yoke_(animal_equipment)'}, {'frequency': 'f', 'synset': 'zebra.n.01', 'synonyms': ['zebra'], 'id': 1202, 'def': 'any of several fleet black-and-white striped African equines', 'name': 'zebra'}, {'frequency': 'c', 'synset': 'zucchini.n.02', 'synonyms': ['zucchini', 'courgette'], 'id': 1203, 'def': 'small cucumber-shaped vegetable marrow; typically dark green', 'name': 'zucchini'}] # noqa
-# fmt: on
diff --git a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/coco_evaluation.py b/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/coco_evaluation.py
deleted file mode 100644
index aad7f5a6e79a9047e7eea623ecc761ea9655b8d6..0000000000000000000000000000000000000000
--- a/spaces/Yiqin/ChatVID/model/vision/grit_src/third_party/CenterNet2/detectron2/evaluation/coco_evaluation.py
+++ /dev/null
@@ -1,710 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import contextlib
-import copy
-import io
-import itertools
-import json
-import logging
-import numpy as np
-import os
-import pickle
-from collections import OrderedDict
-import pycocotools.mask as mask_util
-import torch
-from pycocotools.coco import COCO
-from pycocotools.cocoeval import COCOeval
-from tabulate import tabulate
-
-import detectron2.utils.comm as comm
-from detectron2.config import CfgNode
-from detectron2.data import MetadataCatalog
-from detectron2.data.datasets.coco import convert_to_coco_json
-from detectron2.evaluation.fast_eval_api import COCOeval_opt
-from detectron2.structures import Boxes, BoxMode, pairwise_iou
-from detectron2.utils.file_io import PathManager
-from detectron2.utils.logger import create_small_table
-
-from .evaluator import DatasetEvaluator
-
-
-class COCOEvaluator(DatasetEvaluator):
- """
- Evaluate AR for object proposals, AP for instance detection/segmentation, AP
- for keypoint detection outputs using COCO's metrics.
- See http://cocodataset.org/#detection-eval and
- http://cocodataset.org/#keypoints-eval to understand its metrics.
- The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means
- the metric cannot be computed (e.g. due to no predictions made).
-
- In addition to COCO, this evaluator is able to support any bounding box detection,
- instance segmentation, or keypoint detection dataset.
- """
-
- def __init__(
- self,
- dataset_name,
- tasks=None,
- distributed=True,
- output_dir=None,
- *,
- max_dets_per_image=None,
- use_fast_impl=True,
- kpt_oks_sigmas=(),
- ):
- """
- Args:
- dataset_name (str): name of the dataset to be evaluated.
- It must have either the following corresponding metadata:
-
- "json_file": the path to the COCO format annotation
-
- Or it must be in detectron2's standard dataset format
- so it can be converted to COCO format automatically.
- tasks (tuple[str]): tasks that can be evaluated under the given
- configuration. A task is one of "bbox", "segm", "keypoints".
- By default, will infer this automatically from predictions.
- distributed (True): if True, will collect results from all ranks and run evaluation
- in the main process.
- Otherwise, will only evaluate the results in the current process.
- output_dir (str): optional, an output directory to dump all
- results predicted on the dataset. The dump contains two files:
-
- 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and
- contains all the results in the format they are produced by the model.
- 2. "coco_instances_results.json" a json file in COCO's result format.
- max_dets_per_image (int): limit on the maximum number of detections per image.
- By default in COCO, this limit is to 100, but this can be customized
- to be greater, as is needed in evaluation metrics AP fixed and AP pool
- (see https://arxiv.org/pdf/2102.01066.pdf)
- This doesn't affect keypoint evaluation.
- use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
- Although the results should be very close to the official implementation in COCO
- API, it is still recommended to compute results with the official API for use in
- papers. The faster implementation also uses more RAM.
- kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS.
- See http://cocodataset.org/#keypoints-eval
- When empty, it will use the defaults in COCO.
- Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS.
- """
- self._logger = logging.getLogger(__name__)
- self._distributed = distributed
- self._output_dir = output_dir
- self._use_fast_impl = use_fast_impl
-
- # COCOeval requires the limit on the number of detections per image (maxDets) to be a list
- # with at least 3 elements. The default maxDets in COCOeval is [1, 10, 100], in which the
- # 3rd element (100) is used as the limit on the number of detections per image when
- # evaluating AP. COCOEvaluator expects an integer for max_dets_per_image, so for COCOeval,
- # we reformat max_dets_per_image into [1, 10, max_dets_per_image], based on the defaults.
- if max_dets_per_image is None:
- max_dets_per_image = [1, 10, 100]
- else:
- max_dets_per_image = [1, 10, max_dets_per_image]
- self._max_dets_per_image = max_dets_per_image
-
- if tasks is not None and isinstance(tasks, CfgNode):
- kpt_oks_sigmas = (
- tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas
- )
- self._logger.warn(
- "COCO Evaluator instantiated using config, this is deprecated behavior."
- " Please pass in explicit arguments instead."
- )
- self._tasks = None # Infering it from predictions should be better
- else:
- self._tasks = tasks
-
- self._cpu_device = torch.device("cpu")
-
- self._metadata = MetadataCatalog.get(dataset_name)
- if not hasattr(self._metadata, "json_file"):
- if output_dir is None:
- raise ValueError(
- "output_dir must be provided to COCOEvaluator "
- "for datasets not in COCO format."
- )
- self._logger.info(f"Trying to convert '{dataset_name}' to COCO format ...")
-
- cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json")
- self._metadata.json_file = cache_path
- convert_to_coco_json(dataset_name, cache_path)
-
- json_file = PathManager.get_local_path(self._metadata.json_file)
- with contextlib.redirect_stdout(io.StringIO()):
- self._coco_api = COCO(json_file)
-
- # Test set json files do not contain annotations (evaluation must be
- # performed using the COCO evaluation server).
- self._do_evaluation = "annotations" in self._coco_api.dataset
- if self._do_evaluation:
- self._kpt_oks_sigmas = kpt_oks_sigmas
-
- def reset(self):
- self._predictions = []
-
- def process(self, inputs, outputs):
- """
- Args:
- inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
- It is a list of dict. Each dict corresponds to an image and
- contains keys like "height", "width", "file_name", "image_id".
- outputs: the outputs of a COCO model. It is a list of dicts with key
- "instances" that contains :class:`Instances`.
- """
- for input, output in zip(inputs, outputs):
- prediction = {"image_id": input["image_id"]}
-
- if "instances" in output:
- instances = output["instances"].to(self._cpu_device)
- prediction["instances"] = instances_to_coco_json(instances, input["image_id"])
- if "proposals" in output:
- prediction["proposals"] = output["proposals"].to(self._cpu_device)
- if len(prediction) > 1:
- self._predictions.append(prediction)
-
- def evaluate(self, img_ids=None):
- """
- Args:
- img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset
- """
- if self._distributed:
- comm.synchronize()
- predictions = comm.gather(self._predictions, dst=0)
- predictions = list(itertools.chain(*predictions))
-
- if not comm.is_main_process():
- return {}
- else:
- predictions = self._predictions
-
- if len(predictions) == 0:
- self._logger.warning("[COCOEvaluator] Did not receive valid predictions.")
- return {}
-
- if self._output_dir:
- PathManager.mkdirs(self._output_dir)
- file_path = os.path.join(self._output_dir, "instances_predictions.pth")
- with PathManager.open(file_path, "wb") as f:
- torch.save(predictions, f)
-
- self._results = OrderedDict()
- if "proposals" in predictions[0]:
- self._eval_box_proposals(predictions)
- if "instances" in predictions[0]:
- self._eval_predictions(predictions, img_ids=img_ids)
- # Copy so the caller can do whatever with results
- return copy.deepcopy(self._results)
-
- def _tasks_from_predictions(self, predictions):
- """
- Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions.
- """
- tasks = {"bbox"}
- for pred in predictions:
- if "segmentation" in pred:
- tasks.add("segm")
- if "keypoints" in pred:
- tasks.add("keypoints")
- return sorted(tasks)
-
- def _eval_predictions(self, predictions, img_ids=None):
- """
- Evaluate predictions. Fill self._results with the metrics of the tasks.
- """
- self._logger.info("Preparing results for COCO format ...")
- coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
- tasks = self._tasks or self._tasks_from_predictions(coco_results)
-
- # unmap the category ids for COCO
- if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
- dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id
- all_contiguous_ids = list(dataset_id_to_contiguous_id.values())
- num_classes = len(all_contiguous_ids)
- assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1
-
- reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}
- for result in coco_results:
- category_id = result["category_id"]
- assert category_id < num_classes, (
- f"A prediction has class={category_id}, "
- f"but the dataset only has {num_classes} classes and "
- f"predicted class id should be in [0, {num_classes - 1}]."
- )
- result["category_id"] = reverse_id_mapping[category_id]
-
- if self._output_dir:
- file_path = os.path.join(self._output_dir, "coco_instances_results.json")
- self._logger.info("Saving results to {}".format(file_path))
- with PathManager.open(file_path, "w") as f:
- f.write(json.dumps(coco_results))
- f.flush()
-
- if not self._do_evaluation:
- self._logger.info("Annotations are not available for evaluation.")
- return
-
- self._logger.info(
- "Evaluating predictions with {} COCO API...".format(
- "unofficial" if self._use_fast_impl else "official"
- )
- )
- for task in sorted(tasks):
- assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!"
- coco_eval = (
- _evaluate_predictions_on_coco(
- self._coco_api,
- coco_results,
- task,
- kpt_oks_sigmas=self._kpt_oks_sigmas,
- use_fast_impl=self._use_fast_impl,
- img_ids=img_ids,
- max_dets_per_image=self._max_dets_per_image,
- )
- if len(coco_results) > 0
- else None # cocoapi does not handle empty results very well
- )
-
- res = self._derive_coco_results(
- coco_eval, task, class_names=self._metadata.get("thing_classes")
- )
- self._results[task] = res
-
- def _eval_box_proposals(self, predictions):
- """
- Evaluate the box proposals in predictions.
- Fill self._results with the metrics for "box_proposals" task.
- """
- if self._output_dir:
- # Saving generated box proposals to file.
- # Predicted box_proposals are in XYXY_ABS mode.
- bbox_mode = BoxMode.XYXY_ABS.value
- ids, boxes, objectness_logits = [], [], []
- for prediction in predictions:
- ids.append(prediction["image_id"])
- boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy())
- objectness_logits.append(prediction["proposals"].objectness_logits.numpy())
-
- proposal_data = {
- "boxes": boxes,
- "objectness_logits": objectness_logits,
- "ids": ids,
- "bbox_mode": bbox_mode,
- }
- with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f:
- pickle.dump(proposal_data, f)
-
- if not self._do_evaluation:
- self._logger.info("Annotations are not available for evaluation.")
- return
-
- self._logger.info("Evaluating bbox proposals ...")
- res = {}
- areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
- for limit in [100, 1000]:
- for area, suffix in areas.items():
- stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit)
- key = "AR{}@{:d}".format(suffix, limit)
- res[key] = float(stats["ar"].item() * 100)
- self._logger.info("Proposal metrics: \n" + create_small_table(res))
- self._results["box_proposals"] = res
-
- def _derive_coco_results(self, coco_eval, iou_type, class_names=None):
- """
- Derive the desired score numbers from summarized COCOeval.
-
- Args:
- coco_eval (None or COCOEval): None represents no predictions from model.
- iou_type (str):
- class_names (None or list[str]): if provided, will use it to predict
- per-category AP.
-
- Returns:
- a dict of {metric name: score}
- """
-
- metrics = {
- "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
- "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
- "keypoints": ["AP", "AP50", "AP75", "APm", "APl"],
- }[iou_type]
-
- if coco_eval is None:
- self._logger.warn("No predictions from the model!")
- return {metric: float("nan") for metric in metrics}
-
- # the standard metrics
- results = {
- metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan")
- for idx, metric in enumerate(metrics)
- }
- self._logger.info(
- "Evaluation results for {}: \n".format(iou_type) + create_small_table(results)
- )
- if not np.isfinite(sum(results.values())):
- self._logger.info("Some metrics cannot be computed and is shown as NaN.")
-
- if class_names is None or len(class_names) <= 1:
- return results
- # Compute per-category AP
- # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa
- precisions = coco_eval.eval["precision"]
- # precision has dims (iou, recall, cls, area range, max dets)
- assert len(class_names) == precisions.shape[2]
-
- results_per_category = []
- for idx, name in enumerate(class_names):
- # area range index 0: all area ranges
- # max dets index -1: typically 100 per image
- precision = precisions[:, :, idx, 0, -1]
- precision = precision[precision > -1]
- ap = np.mean(precision) if precision.size else float("nan")
- results_per_category.append(("{}".format(name), float(ap * 100)))
-
- # tabulate it
- N_COLS = min(6, len(results_per_category) * 2)
- results_flatten = list(itertools.chain(*results_per_category))
- results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])
- table = tabulate(
- results_2d,
- tablefmt="pipe",
- floatfmt=".3f",
- headers=["category", "AP"] * (N_COLS // 2),
- numalign="left",
- )
- self._logger.info("Per-category {} AP: \n".format(iou_type) + table)
-
- results.update({"AP-" + name: ap for name, ap in results_per_category})
- return results
-
-
-def instances_to_coco_json(instances, img_id):
- """
- Dump an "Instances" object to a COCO-format json that's used for evaluation.
-
- Args:
- instances (Instances):
- img_id (int): the image id
-
- Returns:
- list[dict]: list of json annotations in COCO format.
- """
- num_instance = len(instances)
- if num_instance == 0:
- return []
-
- boxes = instances.pred_boxes.tensor.numpy()
- boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
- boxes = boxes.tolist()
- scores = instances.scores.tolist()
- classes = instances.pred_classes.tolist()
-
- has_mask = instances.has("pred_masks")
- if has_mask:
- # use RLE to encode the masks, because they are too large and takes memory
- # since this evaluator stores outputs of the entire dataset
- rles = [
- mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0]
- for mask in instances.pred_masks
- ]
- for rle in rles:
- # "counts" is an array encoded by mask_util as a byte-stream. Python3's
- # json writer which always produces strings cannot serialize a bytestream
- # unless you decode it. Thankfully, utf-8 works out (which is also what
- # the pycocotools/_mask.pyx does).
- rle["counts"] = rle["counts"].decode("utf-8")
-
- has_keypoints = instances.has("pred_keypoints")
- if has_keypoints:
- keypoints = instances.pred_keypoints
-
- results = []
- for k in range(num_instance):
- result = {
- "image_id": img_id,
- "category_id": classes[k],
- "bbox": boxes[k],
- "score": scores[k],
- }
- if has_mask:
- result["segmentation"] = rles[k]
- if has_keypoints:
- # In COCO annotations,
- # keypoints coordinates are pixel indices.
- # However our predictions are floating point coordinates.
- # Therefore we subtract 0.5 to be consistent with the annotation format.
- # This is the inverse of data loading logic in `datasets/coco.py`.
- keypoints[k][:, :2] -= 0.5
- result["keypoints"] = keypoints[k].flatten().tolist()
- results.append(result)
- return results
-
-
-# inspired from Detectron:
-# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa
-def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None):
- """
- Evaluate detection proposal recall metrics. This function is a much
- faster alternative to the official COCO API recall evaluation code. However,
- it produces slightly different results.
- """
- # Record max overlap value for each gt box
- # Return vector of overlap values
- areas = {
- "all": 0,
- "small": 1,
- "medium": 2,
- "large": 3,
- "96-128": 4,
- "128-256": 5,
- "256-512": 6,
- "512-inf": 7,
- }
- area_ranges = [
- [0 ** 2, 1e5 ** 2], # all
- [0 ** 2, 32 ** 2], # small
- [32 ** 2, 96 ** 2], # medium
- [96 ** 2, 1e5 ** 2], # large
- [96 ** 2, 128 ** 2], # 96-128
- [128 ** 2, 256 ** 2], # 128-256
- [256 ** 2, 512 ** 2], # 256-512
- [512 ** 2, 1e5 ** 2],
- ] # 512-inf
- assert area in areas, "Unknown area range: {}".format(area)
- area_range = area_ranges[areas[area]]
- gt_overlaps = []
- num_pos = 0
-
- for prediction_dict in dataset_predictions:
- predictions = prediction_dict["proposals"]
-
- # sort predictions in descending order
- # TODO maybe remove this and make it explicit in the documentation
- inds = predictions.objectness_logits.sort(descending=True)[1]
- predictions = predictions[inds]
-
- ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"])
- anno = coco_api.loadAnns(ann_ids)
- gt_boxes = [
- BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
- for obj in anno
- if obj["iscrowd"] == 0
- ]
- gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
- gt_boxes = Boxes(gt_boxes)
- gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])
-
- if len(gt_boxes) == 0 or len(predictions) == 0:
- continue
-
- valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
- gt_boxes = gt_boxes[valid_gt_inds]
-
- num_pos += len(gt_boxes)
-
- if len(gt_boxes) == 0:
- continue
-
- if limit is not None and len(predictions) > limit:
- predictions = predictions[:limit]
-
- overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes)
-
- _gt_overlaps = torch.zeros(len(gt_boxes))
- for j in range(min(len(predictions), len(gt_boxes))):
- # find which proposal box maximally covers each gt box
- # and get the iou amount of coverage for each gt box
- max_overlaps, argmax_overlaps = overlaps.max(dim=0)
-
- # find which gt box is 'best' covered (i.e. 'best' = most iou)
- gt_ovr, gt_ind = max_overlaps.max(dim=0)
- assert gt_ovr >= 0
- # find the proposal box that covers the best covered gt box
- box_ind = argmax_overlaps[gt_ind]
- # record the iou coverage of this gt box
- _gt_overlaps[j] = overlaps[box_ind, gt_ind]
- assert _gt_overlaps[j] == gt_ovr
- # mark the proposal box and the gt box as used
- overlaps[box_ind, :] = -1
- overlaps[:, gt_ind] = -1
-
- # append recorded iou coverage level
- gt_overlaps.append(_gt_overlaps)
- gt_overlaps = (
- torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32)
- )
- gt_overlaps, _ = torch.sort(gt_overlaps)
-
- if thresholds is None:
- step = 0.05
- thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
- recalls = torch.zeros_like(thresholds)
- # compute recall for each iou threshold
- for i, t in enumerate(thresholds):
- recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
- # ar = 2 * np.trapz(recalls, thresholds)
- ar = recalls.mean()
- return {
- "ar": ar,
- "recalls": recalls,
- "thresholds": thresholds,
- "gt_overlaps": gt_overlaps,
- "num_pos": num_pos,
- }
-
-
-def _evaluate_predictions_on_coco(
- coco_gt,
- coco_results,
- iou_type,
- kpt_oks_sigmas=None,
- use_fast_impl=True,
- img_ids=None,
- max_dets_per_image=None,
-):
- """
- Evaluate the coco results using COCOEval API.
- """
- assert len(coco_results) > 0
-
- if iou_type == "segm":
- coco_results = copy.deepcopy(coco_results)
- # When evaluating mask AP, if the results contain bbox, cocoapi will
- # use the box area as the area of the instance, instead of the mask area.
- # This leads to a different definition of small/medium/large.
- # We remove the bbox field to let mask AP use mask area.
- for c in coco_results:
- c.pop("bbox", None)
-
- coco_dt = coco_gt.loadRes(coco_results)
- coco_eval = (COCOeval_opt if use_fast_impl else COCOeval)(coco_gt, coco_dt, iou_type)
- # For COCO, the default max_dets_per_image is [1, 10, 100].
- if max_dets_per_image is None:
- max_dets_per_image = [1, 10, 100] # Default from COCOEval
- else:
- assert (
- len(max_dets_per_image) >= 3
- ), "COCOeval requires maxDets (and max_dets_per_image) to have length at least 3"
- # In the case that user supplies a custom input for max_dets_per_image,
- # apply COCOevalMaxDets to evaluate AP with the custom input.
- if max_dets_per_image[2] != 100:
- coco_eval = COCOevalMaxDets(coco_gt, coco_dt, iou_type)
- if iou_type != "keypoints":
- coco_eval.params.maxDets = max_dets_per_image
-
- if img_ids is not None:
- coco_eval.params.imgIds = img_ids
-
- if iou_type == "keypoints":
- # Use the COCO default keypoint OKS sigmas unless overrides are specified
- if kpt_oks_sigmas:
- assert hasattr(coco_eval.params, "kpt_oks_sigmas"), "pycocotools is too old!"
- coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas)
- # COCOAPI requires every detection and every gt to have keypoints, so
- # we just take the first entry from both
- num_keypoints_dt = len(coco_results[0]["keypoints"]) // 3
- num_keypoints_gt = len(next(iter(coco_gt.anns.values()))["keypoints"]) // 3
- num_keypoints_oks = len(coco_eval.params.kpt_oks_sigmas)
- assert num_keypoints_oks == num_keypoints_dt == num_keypoints_gt, (
- f"[COCOEvaluator] Prediction contain {num_keypoints_dt} keypoints. "
- f"Ground truth contains {num_keypoints_gt} keypoints. "
- f"The length of cfg.TEST.KEYPOINT_OKS_SIGMAS is {num_keypoints_oks}. "
- "They have to agree with each other. For meaning of OKS, please refer to "
- "http://cocodataset.org/#keypoints-eval."
- )
-
- coco_eval.evaluate()
- coco_eval.accumulate()
- coco_eval.summarize()
-
- return coco_eval
-
-
-class COCOevalMaxDets(COCOeval):
- """
- Modified version of COCOeval for evaluating AP with a custom
- maxDets (by default for COCO, maxDets is 100)
- """
-
- def summarize(self):
- """
- Compute and display summary metrics for evaluation results given
- a custom value for max_dets_per_image
- """
-
- def _summarize(ap=1, iouThr=None, areaRng="all", maxDets=100):
- p = self.params
- iStr = " {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}"
- titleStr = "Average Precision" if ap == 1 else "Average Recall"
- typeStr = "(AP)" if ap == 1 else "(AR)"
- iouStr = (
- "{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1])
- if iouThr is None
- else "{:0.2f}".format(iouThr)
- )
-
- aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
- mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
- if ap == 1:
- # dimension of precision: [TxRxKxAxM]
- s = self.eval["precision"]
- # IoU
- if iouThr is not None:
- t = np.where(iouThr == p.iouThrs)[0]
- s = s[t]
- s = s[:, :, :, aind, mind]
- else:
- # dimension of recall: [TxKxAxM]
- s = self.eval["recall"]
- if iouThr is not None:
- t = np.where(iouThr == p.iouThrs)[0]
- s = s[t]
- s = s[:, :, aind, mind]
- if len(s[s > -1]) == 0:
- mean_s = -1
- else:
- mean_s = np.mean(s[s > -1])
- print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))
- return mean_s
-
- def _summarizeDets():
- stats = np.zeros((12,))
- # Evaluate AP using the custom limit on maximum detections per image
- stats[0] = _summarize(1, maxDets=self.params.maxDets[2])
- stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2])
- stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2])
- stats[3] = _summarize(1, areaRng="small", maxDets=self.params.maxDets[2])
- stats[4] = _summarize(1, areaRng="medium", maxDets=self.params.maxDets[2])
- stats[5] = _summarize(1, areaRng="large", maxDets=self.params.maxDets[2])
- stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
- stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
- stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
- stats[9] = _summarize(0, areaRng="small", maxDets=self.params.maxDets[2])
- stats[10] = _summarize(0, areaRng="medium", maxDets=self.params.maxDets[2])
- stats[11] = _summarize(0, areaRng="large", maxDets=self.params.maxDets[2])
- return stats
-
- def _summarizeKps():
- stats = np.zeros((10,))
- stats[0] = _summarize(1, maxDets=20)
- stats[1] = _summarize(1, maxDets=20, iouThr=0.5)
- stats[2] = _summarize(1, maxDets=20, iouThr=0.75)
- stats[3] = _summarize(1, maxDets=20, areaRng="medium")
- stats[4] = _summarize(1, maxDets=20, areaRng="large")
- stats[5] = _summarize(0, maxDets=20)
- stats[6] = _summarize(0, maxDets=20, iouThr=0.5)
- stats[7] = _summarize(0, maxDets=20, iouThr=0.75)
- stats[8] = _summarize(0, maxDets=20, areaRng="medium")
- stats[9] = _summarize(0, maxDets=20, areaRng="large")
- return stats
-
- if not self.eval:
- raise Exception("Please run accumulate() first")
- iouType = self.params.iouType
- if iouType == "segm" or iouType == "bbox":
- summarize = _summarizeDets
- elif iouType == "keypoints":
- summarize = _summarizeKps
- self.stats = summarize()
-
- def __str__(self):
- self.summarize()
diff --git a/spaces/YotamNitzan/domain-expansion/torch_utils/ops/bias_act.cpp b/spaces/YotamNitzan/domain-expansion/torch_utils/ops/bias_act.cpp
deleted file mode 100644
index 5d2425d8054991a8e8b6f7a940fd0ff7fa0bb330..0000000000000000000000000000000000000000
--- a/spaces/YotamNitzan/domain-expansion/torch_utils/ops/bias_act.cpp
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
-//
-// NVIDIA CORPORATION and its licensors retain all intellectual property
-// and proprietary rights in and to this software, related documentation
-// and any modifications thereto. Any use, reproduction, disclosure or
-// distribution of this software and related documentation without an express
-// license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-#include
-#include
-#include
-#include "bias_act.h"
-
-//------------------------------------------------------------------------
-
-static bool has_same_layout(torch::Tensor x, torch::Tensor y)
-{
- if (x.dim() != y.dim())
- return false;
- for (int64_t i = 0; i < x.dim(); i++)
- {
- if (x.size(i) != y.size(i))
- return false;
- if (x.size(i) >= 2 && x.stride(i) != y.stride(i))
- return false;
- }
- return true;
-}
-
-//------------------------------------------------------------------------
-
-static torch::Tensor bias_act(torch::Tensor x, torch::Tensor b, torch::Tensor xref, torch::Tensor yref, torch::Tensor dy, int grad, int dim, int act, float alpha, float gain, float clamp)
-{
- // Validate arguments.
- TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device");
- TORCH_CHECK(b.numel() == 0 || (b.dtype() == x.dtype() && b.device() == x.device()), "b must have the same dtype and device as x");
- TORCH_CHECK(xref.numel() == 0 || (xref.sizes() == x.sizes() && xref.dtype() == x.dtype() && xref.device() == x.device()), "xref must have the same shape, dtype, and device as x");
- TORCH_CHECK(yref.numel() == 0 || (yref.sizes() == x.sizes() && yref.dtype() == x.dtype() && yref.device() == x.device()), "yref must have the same shape, dtype, and device as x");
- TORCH_CHECK(dy.numel() == 0 || (dy.sizes() == x.sizes() && dy.dtype() == x.dtype() && dy.device() == x.device()), "dy must have the same dtype and device as x");
- TORCH_CHECK(x.numel() <= INT_MAX, "x is too large");
- TORCH_CHECK(b.dim() == 1, "b must have rank 1");
- TORCH_CHECK(b.numel() == 0 || (dim >= 0 && dim < x.dim()), "dim is out of bounds");
- TORCH_CHECK(b.numel() == 0 || b.numel() == x.size(dim), "b has wrong number of elements");
- TORCH_CHECK(grad >= 0, "grad must be non-negative");
-
- // Validate layout.
- TORCH_CHECK(x.is_non_overlapping_and_dense(), "x must be non-overlapping and dense");
- TORCH_CHECK(b.is_contiguous(), "b must be contiguous");
- TORCH_CHECK(xref.numel() == 0 || has_same_layout(xref, x), "xref must have the same layout as x");
- TORCH_CHECK(yref.numel() == 0 || has_same_layout(yref, x), "yref must have the same layout as x");
- TORCH_CHECK(dy.numel() == 0 || has_same_layout(dy, x), "dy must have the same layout as x");
-
- // Create output tensor.
- const at::cuda::OptionalCUDAGuard device_guard(device_of(x));
- torch::Tensor y = torch::empty_like(x);
- TORCH_CHECK(has_same_layout(y, x), "y must have the same layout as x");
-
- // Initialize CUDA kernel parameters.
- bias_act_kernel_params p;
- p.x = x.data_ptr();
- p.b = (b.numel()) ? b.data_ptr() : NULL;
- p.xref = (xref.numel()) ? xref.data_ptr() : NULL;
- p.yref = (yref.numel()) ? yref.data_ptr() : NULL;
- p.dy = (dy.numel()) ? dy.data_ptr() : NULL;
- p.y = y.data_ptr();
- p.grad = grad;
- p.act = act;
- p.alpha = alpha;
- p.gain = gain;
- p.clamp = clamp;
- p.sizeX = (int)x.numel();
- p.sizeB = (int)b.numel();
- p.stepB = (b.numel()) ? (int)x.stride(dim) : 1;
-
- // Choose CUDA kernel.
- void* kernel;
- AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&]
- {
- kernel = choose_bias_act_kernel(p);
- });
- TORCH_CHECK(kernel, "no CUDA kernel found for the specified activation func");
-
- // Launch CUDA kernel.
- p.loopX = 4;
- int blockSize = 4 * 32;
- int gridSize = (p.sizeX - 1) / (p.loopX * blockSize) + 1;
- void* args[] = {&p};
- AT_CUDA_CHECK(cudaLaunchKernel(kernel, gridSize, blockSize, args, 0, at::cuda::getCurrentCUDAStream()));
- return y;
-}
-
-//------------------------------------------------------------------------
-
-PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
-{
- m.def("bias_act", &bias_act);
-}
-
-//------------------------------------------------------------------------
diff --git a/spaces/ZachNagengast/vid2grid/app.py b/spaces/ZachNagengast/vid2grid/app.py
deleted file mode 100644
index 7a6d2063ba918ea840003d16b6aa54a52b9f834a..0000000000000000000000000000000000000000
--- a/spaces/ZachNagengast/vid2grid/app.py
+++ /dev/null
@@ -1,248 +0,0 @@
-import gradio as gr
-from PIL import Image, ImageDraw, ImageFont, ImageSequence
-import numpy as np
-import cv2
-import os
-import tempfile
-
-global stored_frames
-
-def load_and_store_frames(image_file, grid_x, grid_y):
- global stored_frames
-
- try:
- # Make sure file exists
- if image_file is None:
- return "File not found", ""
-
- print(f"Loading frames for {image_file.name}")
-
- if image_file.name.endswith('.mp4'):
- frames = extract_frames_from_video(image_file.name)
- video_path = image_file.name
- else: # it's a gif
- try:
- img = Image.open(image_file.name)
- except Exception as e:
- print(f"Could not open GIF file: {e}")
- return "Could not open GIF file", ""
-
- frames = []
- for i in range(0, img.n_frames):
- try:
- img.seek(i)
- frames.append(img.copy())
- except Exception as e:
- print(f"Could not seek to frame {i}: {e}")
-
- # Convert GIF to MP4 for preview
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
- tmp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
- video_path = tmp_file.name
-
- try:
- duration = img.info.get('duration', 100)
- # default to reasonable framerate if duration is 0
- framerate = 1 / (duration / 1000.0) if duration > 0 else 10
- print(f"frame count: {len(frames)} framerate: {duration} {img.info}")
- convert_gif_to_video(image_file.name, tmp_file.name, framerate)
- except Exception as e:
- print(f"Could not convert GIF to MP4: {e}")
-
- stored_frames = frames # Store the frames for later use
- total_frames = len(frames)
- selected_frames_count = grid_x * grid_y
- details = f"**Total Frames:** {len(frames)}\n\n"
- output_info = f"Grid size: {grid_x} x {grid_y}\n\nSelected Frames: {selected_frames_count} / {total_frames} ({selected_frames_count / total_frames * 100:.2f}%)"
- return f"Frames loaded successfully\n\n{details}\n\n{output_info}", video_path
- except Exception as e:
- print(f"An error occurred while loading and storing frames: {e}")
- return f"An error occurred: {e}", ""
-
-def generate_grid(grid_x, grid_y, font_size, font_color, position, border_size, border_color):
- global stored_frames
- # print(f"Processing grid with {grid_x} x {grid_y} grid size, font size {font_size}, font color {font_color}, position {position}, border size {border_size}, border color {border_color}")
-
- if stored_frames is None:
- load_and_store_frames()
-
-
- grid_img, output_info = create_grid(stored_frames, grid_x, grid_y, font_size, font_color, position, border_size, border_color)
- details = f"Total Frames: {len(stored_frames)}\n\n{output_info}"
- return grid_img, details
-
-def create_grid(frames, grid_x, grid_y, font_size, font_color, position, border_size, border_color):
- total_frames = len(frames)
- selected_frames_count = grid_x * grid_y
-
- # Select evenly spaced frames
- selected_frames_indices = np.linspace(0, total_frames - 1, selected_frames_count).astype(int)
- selected_frames = [frames[i] for i in selected_frames_indices]
-
- # Modify frames by adding border and number
- modified_frames = []
- try:
- font = ImageFont.truetype("Lato-Regular.ttf", font_size)
- except IOError:
- print("Font not found, using default font.")
- font = ImageFont.load_default()
-
- positions = {
- "Top Left": (20, 20),
- "Top Right": (frames[0].width - 20 - font_size, 20),
- "Bottom Left": (20, frames[0].height - 20 - font_size),
- "Bottom Right": (frames[0].width - 20 - font_size, frames[0].height - 20 - font_size)
- }
-
- for i, frame in enumerate(selected_frames):
- # Add border
- border_width = border_size
- frame_with_border = Image.new('RGB', (frame.width + 2*border_width, frame.height + 2*border_width), border_color.lower())
- frame_with_border.paste(frame, (border_width, border_width))
-
- # Add number
- draw = ImageDraw.Draw(frame_with_border)
- text = str(i + 1)
- text_position = (border_width + positions[position][0], border_width + positions[position][1])
- draw.text(text_position, text, font=font, fill=font_color)
-
- modified_frames.append(frame_with_border)
-
- # Combine modified frames into a grid
- grid_width = modified_frames[0].width * grid_x
- grid_height = modified_frames[0].height * grid_y
- grid_img = Image.new('RGB', (grid_width, grid_height), border_color.lower())
- for i, frame in enumerate(modified_frames):
- x_offset = (i % grid_x) * frame.width
- y_offset = (i // grid_x) * frame.height
- grid_img.paste(frame, (x_offset, y_offset))
-
- output_info = f"Grid size: {grid_x} x {grid_y}\n\nSelected Frames: {selected_frames_count} / {total_frames} ({selected_frames_count / total_frames * 100:.2f}%)"
- return grid_img, output_info
-
-def extract_frames_from_video(video_file):
- """Extract frames from an MP4 video."""
- frames = []
- cap = cv2.VideoCapture(video_file)
- while True:
- ret, frame = cap.read()
- if not ret:
- break
- # Convert BGR format (used by OpenCV) to RGB
- frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
- frames.append(Image.fromarray(frame_rgb))
- cap.release()
- return frames
-
-
-def convert_gif_to_video(gif_path, output_video_path, frame_rate):
- try:
- # Load the gif
- gif = Image.open(gif_path)
- except Exception as e:
- print(f"Could not open GIF file: {e}")
- return
-
- try:
- # Define the codec and create VideoWriter object
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
- out = cv2.VideoWriter(output_video_path, fourcc, frame_rate, (gif.width, gif.height))
- except Exception as e:
- print(f"Could not create VideoWriter object: {e}")
- return
-
- try:
- # Iterate over the frames of the gif
- for frame_index in range(gif.n_frames):
- gif.seek(frame_index)
- # Convert the PIL Image to an array
- frame_arr = np.array(gif.convert("RGB"))
- # Convert RGB to BGR format
- frame_bgr = cv2.cvtColor(frame_arr, cv2.COLOR_RGB2BGR)
- # Write the frame to the video
- out.write(frame_bgr)
- except Exception as e:
- print(f"Could not write frame to video: {e}")
-
- out.release()
-
-def gif_or_video_info(image_file, grid_x, grid_y, font_size, font_color, position, border_size, border_color):
- image_file.file.seek(0)
- video_path = ""
-
- if image_file.name.endswith('.mp4'):
- video_path = image_file.name
- cap = cv2.VideoCapture(image_file.name)
- frame_rate = cap.get(cv2.CAP_PROP_FPS) # Get the actual frame rate of the video
- frames = extract_frames_from_video(image_file.name)
- total_frames = len(frames)
- cap.release()
- else: # it's a gif
- img = Image.open(image_file.name)
- frames = []
- for i in range(0, img.n_frames):
- img.seek(i)
- frames.append(img.copy())
-
- total_frames = img.n_frames
- frame_rate = 1 / (img.info.get('duration', 100) / 1000.0) # Convert to seconds
-
- # Convert GIF to MP4 and save it to a temp path
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
- tmp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
- video_path = tmp_file.name
- convert_gif_to_video(image_file.name, tmp_file.name, frame_rate)
-
- grid_img, output_info = create_grid(frames, grid_x, grid_y, font_size, font_color, position, border_size, border_color)
- details = f"**Total Frames:** {total_frames}\n\n**Frame Rate:** {frame_rate} frames/sec\n\n{output_info}"
-
- return grid_img, details, video_path
-
-def gif_info(image_file, grid_x, grid_y, font_size, font_color, position, border_size, border_color):
- return gif_or_video_info(image_file, grid_x, grid_y, font_size, font_color, position, border_size, border_color)
-
-def mirror(x):
- return x
-
-with gr.Blocks() as app:
- gr.Markdown('## vid2grid Generator')
- gr.Markdown('Upload a GIF or MP4 to generate a grid from its frames. Use the sliders to adjust the grid size and text settings.\n\nThis is particularly useful for use with multi modal models such as GPT-4V to retrieve descriptions of short videos or gifs, [example here.](https://twitter.com/zachnagengast/status/1712896232170180651)\n\n **Note:** The grid will be generated only after clicking the "Generate Grid" button.')
- with gr.Row():
- with gr.Column():
- control_image = gr.File(label="Upload a short MP4 or GIF", type="file", elem_id="file_upload", file_types=[".gif", ".mp4"])
- video_preview = gr.Video(interactive=False, label="Preview", format="mp4")
- gif_details = gr.Markdown("No file found.")
- # gr.Examples(
- # examples=[os.path.join(os.path.dirname(__file__), "demo.mp4")],
- # inputs=[control_image],
- # outputs=[gif_details, video_preview],
- # fn=load_and_store_frames,
- # cache_examples=True,
- # )
- process_button = gr.Button("Generate Grid") # New button to trigger the heavy computation
- grid_x_slider = gr.Slider(minimum=1, maximum=10, step=1, value=3, label="Grid X Size")
- grid_y_slider = gr.Slider(minimum=1, maximum=10, step=1, value=3, label="Grid Y Size")
- font_color_dropdown = gr.Dropdown(choices=["Black", "White", "Red", "Green", "Blue"], value="White", label="Numbering Color")
- position_radio = gr.Radio(choices=["Top Left", "Top Right", "Bottom Left", "Bottom Right"], value="Top Left", label="Numbering Position")
- font_size_slider = gr.Slider(minimum=10, maximum=100, step=5, value=40, label="Font Size")
- border_color_dropdown = gr.Dropdown(choices=["Black", "White", "Red", "Green", "Blue"], value="White", label="Border Color")
- border_size_slider = gr.Slider(minimum=0, maximum=100, step=5, value=10, label="Border Size")
- with gr.Column():
- result_image = gr.Image(label="Generated Grid", value="https://i.imgur.com/fYrBwbd.png")
-
- # Use .change() method to listen for changes in any of the controls
- control_image.upload(load_and_store_frames, inputs=[control_image, grid_x_slider, grid_y_slider], outputs=[gif_details, video_preview])
-
- # grid_x_slider.change(generate_grid, inputs=[grid_x_slider, grid_y_slider, font_size_slider, font_color_dropdown, position_radio, border_size_slider, border_color_dropdown], outputs=[result_image, gif_details, video_preview])
- # grid_y_slider.change(generate_grid, inputs=[grid_x_slider, grid_y_slider, font_size_slider, font_color_dropdown, position_radio, border_size_slider, border_color_dropdown], outputs=[result_image, gif_details])
- # font_size_slider.change(generate_grid, inputs=[grid_x_slider, grid_y_slider, font_size_slider, font_color_dropdown, position_radio, border_size_slider, border_color_dropdown], outputs=[result_image, gif_details])
- # font_color_dropdown.change(generate_grid, inputs=[grid_x_slider, grid_y_slider, font_size_slider, font_color_dropdown, position_radio, border_size_slider, border_color_dropdown], outputs=[result_image, gif_details])
- # position_radio.change(generate_grid, inputs=[grid_x_slider, grid_y_slider, font_size_slider, font_color_dropdown, position_radio, border_size_slider, border_color_dropdown], outputs=[result_image, gif_details])
- # border_size_slider.change(generate_grid, inputs=[grid_x_slider, grid_y_slider, font_size_slider, font_color_dropdown, position_radio, border_size_slider, border_color_dropdown], outputs=[result_image, gif_details])
- # border_color_dropdown.change(generate_grid, inputs=[grid_x_slider, grid_y_slider, font_size_slider, font_color_dropdown, position_radio, border_size_slider, border_color_dropdown], outputs=[result_image, gif_details])
-
- process_button.click(generate_grid, inputs=[grid_x_slider, grid_y_slider, font_size_slider, font_color_dropdown, position_radio, border_size_slider, border_color_dropdown], outputs=[result_image, gif_details])
-
-if __name__ == "__main__":
- stored_frames = None
- app.launch()
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/bbox/__init__.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/bbox/__init__.py
deleted file mode 100644
index a3537297f57e4c3670afdb97b5fcb1b2d775e5f3..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/core/bbox/__init__.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner,
- MaxIoUAssigner, RegionAssigner)
-from .builder import build_assigner, build_bbox_coder, build_sampler
-from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, PseudoBBoxCoder,
- TBLRBBoxCoder)
-from .iou_calculators import BboxOverlaps2D, bbox_overlaps
-from .samplers import (BaseSampler, CombinedSampler,
- InstanceBalancedPosSampler, IoUBalancedNegSampler,
- OHEMSampler, PseudoSampler, RandomSampler,
- SamplingResult, ScoreHLRSampler)
-from .transforms import (bbox2distance, bbox2result, bbox2roi,
- bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,
- bbox_mapping_back, bbox_rescale, bbox_xyxy_to_cxcywh,
- distance2bbox, roi2bbox)
-
-__all__ = [
- 'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner',
- 'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler',
- 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
- 'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'build_assigner',
- 'build_sampler', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',
- 'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance',
- 'build_bbox_coder', 'BaseBBoxCoder', 'PseudoBBoxCoder',
- 'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'CenterRegionAssigner',
- 'bbox_rescale', 'bbox_cxcywh_to_xyxy', 'bbox_xyxy_to_cxcywh',
- 'RegionAssigner'
-]
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/datasets/samplers/distributed_sampler.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/datasets/samplers/distributed_sampler.py
deleted file mode 100644
index cc61019484655ee2829f7908dc442caa20cf1d54..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet_null/datasets/samplers/distributed_sampler.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import math
-
-import torch
-from torch.utils.data import DistributedSampler as _DistributedSampler
-
-
-class DistributedSampler(_DistributedSampler):
-
- def __init__(self,
- dataset,
- num_replicas=None,
- rank=None,
- shuffle=True,
- seed=0):
- super().__init__(
- dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
- # for the compatibility from PyTorch 1.3+
- self.seed = seed if seed is not None else 0
-
- def __iter__(self):
- # deterministically shuffle based on epoch
- if self.shuffle:
- g = torch.Generator()
- g.manual_seed(self.epoch + self.seed)
- indices = torch.randperm(len(self.dataset), generator=g).tolist()
- else:
- indices = torch.arange(len(self.dataset)).tolist()
-
- # add extra samples to make it evenly divisible
- # in case that indices is shorter than half of total_size
- indices = (indices *
- math.ceil(self.total_size / len(indices)))[:self.total_size]
- assert len(indices) == self.total_size
-
- # subsample
- indices = indices[self.rank:self.total_size:self.num_replicas]
- assert len(indices) == self.num_samples
-
- return iter(indices)
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmseg/models/utils/se_layer.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmseg/models/utils/se_layer.py
deleted file mode 100644
index 083bd7d1ccee909c900c7aed2cc928bf14727f3e..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmseg/models/utils/se_layer.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import annotator.uniformer.mmcv as mmcv
-import torch.nn as nn
-from annotator.uniformer.mmcv.cnn import ConvModule
-
-from .make_divisible import make_divisible
-
-
-class SELayer(nn.Module):
- """Squeeze-and-Excitation Module.
-
- Args:
- channels (int): The input (and output) channels of the SE layer.
- ratio (int): Squeeze ratio in SELayer, the intermediate channel will be
- ``int(channels/ratio)``. Default: 16.
- conv_cfg (None or dict): Config dict for convolution layer.
- Default: None, which means using conv2d.
- act_cfg (dict or Sequence[dict]): Config dict for activation layer.
- If act_cfg is a dict, two activation layers will be configured
- by this dict. If act_cfg is a sequence of dicts, the first
- activation layer will be configured by the first dict and the
- second activation layer will be configured by the second dict.
- Default: (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0,
- divisor=6.0)).
- """
-
- def __init__(self,
- channels,
- ratio=16,
- conv_cfg=None,
- act_cfg=(dict(type='ReLU'),
- dict(type='HSigmoid', bias=3.0, divisor=6.0))):
- super(SELayer, self).__init__()
- if isinstance(act_cfg, dict):
- act_cfg = (act_cfg, act_cfg)
- assert len(act_cfg) == 2
- assert mmcv.is_tuple_of(act_cfg, dict)
- self.global_avgpool = nn.AdaptiveAvgPool2d(1)
- self.conv1 = ConvModule(
- in_channels=channels,
- out_channels=make_divisible(channels // ratio, 8),
- kernel_size=1,
- stride=1,
- conv_cfg=conv_cfg,
- act_cfg=act_cfg[0])
- self.conv2 = ConvModule(
- in_channels=make_divisible(channels // ratio, 8),
- out_channels=channels,
- kernel_size=1,
- stride=1,
- conv_cfg=conv_cfg,
- act_cfg=act_cfg[1])
-
- def forward(self, x):
- out = self.global_avgpool(x)
- out = self.conv1(out)
- out = self.conv2(out)
- return x * out
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/arraymisc/quantization.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/arraymisc/quantization.py
deleted file mode 100644
index 8e47a3545780cf071a1ef8195efb0b7b662c8186..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/arraymisc/quantization.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import numpy as np
-
-
-def quantize(arr, min_val, max_val, levels, dtype=np.int64):
- """Quantize an array of (-inf, inf) to [0, levels-1].
-
- Args:
- arr (ndarray): Input array.
- min_val (scalar): Minimum value to be clipped.
- max_val (scalar): Maximum value to be clipped.
- levels (int): Quantization levels.
- dtype (np.type): The type of the quantized array.
-
- Returns:
- tuple: Quantized array.
- """
- if not (isinstance(levels, int) and levels > 1):
- raise ValueError(
- f'levels must be a positive integer, but got {levels}')
- if min_val >= max_val:
- raise ValueError(
- f'min_val ({min_val}) must be smaller than max_val ({max_val})')
-
- arr = np.clip(arr, min_val, max_val) - min_val
- quantized_arr = np.minimum(
- np.floor(levels * arr / (max_val - min_val)).astype(dtype), levels - 1)
-
- return quantized_arr
-
-
-def dequantize(arr, min_val, max_val, levels, dtype=np.float64):
- """Dequantize an array.
-
- Args:
- arr (ndarray): Input array.
- min_val (scalar): Minimum value to be clipped.
- max_val (scalar): Maximum value to be clipped.
- levels (int): Quantization levels.
- dtype (np.type): The type of the dequantized array.
-
- Returns:
- tuple: Dequantized array.
- """
- if not (isinstance(levels, int) and levels > 1):
- raise ValueError(
- f'levels must be a positive integer, but got {levels}')
- if min_val >= max_val:
- raise ValueError(
- f'min_val ({min_val}) must be smaller than max_val ({max_val})')
-
- dequantized_arr = (arr + 0.5).astype(dtype) * (max_val -
- min_val) / levels + min_val
-
- return dequantized_arr
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/decode_heads/nl_head.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/decode_heads/nl_head.py
deleted file mode 100644
index 8db863e006b0a49138c94ae2a044bc57ad3e2e26..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/decode_heads/nl_head.py
+++ /dev/null
@@ -1,61 +0,0 @@
-'''
- * Copyright (c) 2023 Salesforce, Inc.
- * All rights reserved.
- * SPDX-License-Identifier: Apache License 2.0
- * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
- * By Can Qin
- * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
- * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
- * Modified from MMCV repo: From https://github.com/open-mmlab/mmcv
- * Copyright (c) OpenMMLab. All rights reserved.
-'''
-
-import torch
-from annotator.uniformer.mmcv.cnn import NonLocal2d
-
-from ..builder import HEADS
-from .fcn_head import FCNHead
-
-
-@HEADS.register_module()
-class NLHead(FCNHead):
- """Non-local Neural Networks.
-
- This head is the implementation of `NLNet
- `_.
-
- Args:
- reduction (int): Reduction factor of projection transform. Default: 2.
- use_scale (bool): Whether to scale pairwise_weight by
- sqrt(1/inter_channels). Default: True.
- mode (str): The nonlocal mode. Options are 'embedded_gaussian',
- 'dot_product'. Default: 'embedded_gaussian.'.
- """
-
- def __init__(self,
- reduction=2,
- use_scale=True,
- mode='embedded_gaussian',
- **kwargs):
- super(NLHead, self).__init__(num_convs=2, **kwargs)
- self.reduction = reduction
- self.use_scale = use_scale
- self.mode = mode
- self.nl_block = NonLocal2d(
- in_channels=self.channels,
- reduction=self.reduction,
- use_scale=self.use_scale,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- mode=self.mode)
-
- def forward(self, inputs):
- """Forward function."""
- x = self._transform_inputs(inputs)
- output = self.convs[0](x)
- output = self.nl_block(output)
- output = self.convs[1](output)
- if self.concat_input:
- output = self.conv_cat(torch.cat([x, output], dim=1))
- output = self.cls_seg(output)
- return output
diff --git a/spaces/abnerzhang/ieltsGrade/app.py b/spaces/abnerzhang/ieltsGrade/app.py
deleted file mode 100644
index cce943084669c60eab7009090f6a9041da240742..0000000000000000000000000000000000000000
--- a/spaces/abnerzhang/ieltsGrade/app.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import gradio as gr
-def generate_essay(prompt, band_score):
- # replace this with code to generate essay text using AI
- return "Generated essay text"
-
-def grade_essay(essay_text):
- # replace this with code to grade the essay text using AI
- return "Band Score: 7.5"
-
-with gr.Blocks() as app:
- prompt = gr.Textbox(label="IELTS Writing Prompt",
- lines=3,
- placeholder="Enter IELTS writing prompt here...",
- value="IELTS Essay Task 1: Department Stores and Online Stores in Australia\n\n"
- "The table gives information about department and online stores in Australia in 2011.\n"
- "Summarise the information by selecting and reporting the main features, and make comparisons where relevant.\n\n"
- "Department Stores Online Stores\n"
- "Number of Businesses 67 368\n"
- "Profit (AUD) 807 863\n"
- "Sales Revenue (AUD) 12,700 13,400\n"
- "Growth .4% .6%")
-
- essay = gr.Textbox(label="Generated Essay",
- lines=10,
- placeholder="Generated essay will appear here...")
-
- for band_score in range(6, 10):
- btn = gr.Button(f"Generate Band {band_score} Essay")
- btn.click(generate_essay, inputs=prompt, outputs=essay)
-
- grade = gr.Button("Grade")
- grade_result = gr.Textbox(label="Grade Result",
- lines=2,
- placeholder="Graded result will appear here...")
-
- grade.click(grade_essay, inputs=essay, outputs=grade_result)
-
-app.launch()
-
diff --git a/spaces/acmyu/frame_interpolation_prototype/parameter.py b/spaces/acmyu/frame_interpolation_prototype/parameter.py
deleted file mode 100644
index dc24561793644aec142cbbccfd98c5fd90a2629e..0000000000000000000000000000000000000000
--- a/spaces/acmyu/frame_interpolation_prototype/parameter.py
+++ /dev/null
@@ -1,54 +0,0 @@
-import argparse
-
-def str2bool(v):
- return v.lower() in ('true')
-
-def get_parameters():
-
- parser = argparse.ArgumentParser()
-
- # Model hyper-parameters
- parser.add_argument('--model', type=str, default='sagan', choices=['sagan', 'qgan'])
- parser.add_argument('--adv_loss', type=str, default='wgan-gp', choices=['wgan-gp', 'hinge'])
- parser.add_argument('--imsize', type=int, default=32)
- parser.add_argument('--g_num', type=int, default=5)
- parser.add_argument('--z_dim', type=int, default=128)
- parser.add_argument('--g_conv_dim', type=int, default=64)
- parser.add_argument('--d_conv_dim', type=int, default=64)
- parser.add_argument('--lambda_gp', type=float, default=10)
- parser.add_argument('--version', type=str, default='sagan_1')
-
- # Training setting
- parser.add_argument('--total_step', type=int, default=100000, help='how many times to update the generator')
- parser.add_argument('--d_iters', type=float, default=5)
- parser.add_argument('--batch_size', type=int, default=64)
- parser.add_argument('--num_workers', type=int, default=2)
- parser.add_argument('--g_lr', type=float, default=0.0001)
- parser.add_argument('--d_lr', type=float, default=0.0004)
- parser.add_argument('--lr_decay', type=float, default=0.95)
- parser.add_argument('--beta1', type=float, default=0.0)
- parser.add_argument('--beta2', type=float, default=0.9)
-
- # using pretrained
- parser.add_argument('--pretrained_model', type=int, default=None)
-
- # Misc
- parser.add_argument('--train', type=str2bool, default=True)
- parser.add_argument('--parallel', type=str2bool, default=False)
- parser.add_argument('--dataset', type=str, default='cifar', choices=['lsun', 'celeb', 'frames'])
- parser.add_argument('--use_tensorboard', type=str2bool, default=False)
-
- # Path
- parser.add_argument('--image_path', type=str, default='../data')
- parser.add_argument('--log_path', type=str, default='./logs')
- parser.add_argument('--model_save_path', type=str, default='./models')
- parser.add_argument('--sample_path', type=str, default='./samples')
- parser.add_argument('--attn_path', type=str, default='./attn')
-
- # Step size
- parser.add_argument('--log_step', type=int, default=10)
- parser.add_argument('--sample_step', type=int, default=100)
- parser.add_argument('--model_save_step', type=float, default=1.0)
-
-
- return parser.parse_args()
\ No newline at end of file
diff --git a/spaces/adhisetiawan/anime-voice-generator/README.md b/spaces/adhisetiawan/anime-voice-generator/README.md
deleted file mode 100644
index 2e44ec5507a21c84647346865c876ce2b48db560..0000000000000000000000000000000000000000
--- a/spaces/adhisetiawan/anime-voice-generator/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Vits Models
-emoji: 🏃
-colorFrom: pink
-colorTo: indigo
-sdk: gradio
-sdk_version: 3.17.0
-app_file: app.py
-pinned: false
-license: apache-2.0
-duplicated_from: sayashi/vits-models
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/akhaliq/Real-Time-Voice-Cloning/synthesizer/__init__.py b/spaces/akhaliq/Real-Time-Voice-Cloning/synthesizer/__init__.py
deleted file mode 100644
index 4287ca8617970fa8fc025b75cb319c7032706910..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/Real-Time-Voice-Cloning/synthesizer/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-#
\ No newline at end of file
diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/pangomarkup.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/pangomarkup.py
deleted file mode 100644
index b0657a5f0fd96c28ed09c94a3c1798de2fbcd2a6..0000000000000000000000000000000000000000
--- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/pangomarkup.py
+++ /dev/null
@@ -1,83 +0,0 @@
-"""
- pygments.formatters.pangomarkup
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- Formatter for Pango markup output.
-
- :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-from pip._vendor.pygments.formatter import Formatter
-
-
-__all__ = ['PangoMarkupFormatter']
-
-
-_escape_table = {
- ord('&'): '&',
- ord('<'): '<',
-}
-
-
-def escape_special_chars(text, table=_escape_table):
- """Escape & and < for Pango Markup."""
- return text.translate(table)
-
-
-class PangoMarkupFormatter(Formatter):
- """
- Format tokens as Pango Markup code. It can then be rendered to an SVG.
-
- .. versionadded:: 2.9
- """
-
- name = 'Pango Markup'
- aliases = ['pango', 'pangomarkup']
- filenames = []
-
- def __init__(self, **options):
- Formatter.__init__(self, **options)
-
- self.styles = {}
-
- for token, style in self.style:
- start = ''
- end = ''
- if style['color']:
- start += '' % style['color']
- end = '' + end
- if style['bold']:
- start += ''
- end = '' + end
- if style['italic']:
- start += ''
- end = '' + end
- if style['underline']:
- start += ''
- end = '' + end
- self.styles[token] = (start, end)
-
- def format_unencoded(self, tokensource, outfile):
- lastval = ''
- lasttype = None
-
- outfile.write('')
-
- for ttype, value in tokensource:
- while ttype not in self.styles:
- ttype = ttype.parent
- if ttype == lasttype:
- lastval += escape_special_chars(value)
- else:
- if lastval:
- stylebegin, styleend = self.styles[lasttype]
- outfile.write(stylebegin + lastval + styleend)
- lastval = escape_special_chars(value)
- lasttype = ttype
-
- if lastval:
- stylebegin, styleend = self.styles[lasttype]
- outfile.write(stylebegin + lastval + styleend)
-
- outfile.write('')
diff --git a/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/DOM/NamedNodeMap.pod b/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/DOM/NamedNodeMap.pod
deleted file mode 100644
index 62c276272a8483b0bfc2966ba7a990ae96175363..0000000000000000000000000000000000000000
--- a/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/DOM/NamedNodeMap.pod
+++ /dev/null
@@ -1,130 +0,0 @@
-=head1 NAME
-
-XML::DOM::NamedNodeMap - A hash table interface for XML::DOM
-
-=head1 DESCRIPTION
-
-Objects implementing the NamedNodeMap interface are used to represent
-collections of nodes that can be accessed by name. Note that
-NamedNodeMap does not inherit from NodeList; NamedNodeMaps are not
-maintained in any particular order. Objects contained in an object
-implementing NamedNodeMap may also be accessed by an ordinal index, but
-this is simply to allow convenient enumeration of the contents of a
-NamedNodeMap, and does not imply that the DOM specifies an order to
-these Nodes.
-
-Note that in this implementation, the objects added to a NamedNodeMap
-are kept in order.
-
-=head2 METHODS
-
-=over 4
-
-=item getNamedItem (name)
-
-Retrieves a node specified by name.
-
-Return Value: A Node (of any type) with the specified name, or undef if
-the specified name did not identify any node in the map.
-
-=item setNamedItem (arg)
-
-Adds a node using its nodeName attribute.
-
-As the nodeName attribute is used to derive the name which
-the node must be stored under, multiple nodes of certain
-types (those that have a "special" string value) cannot be
-stored as the names would clash. This is seen as preferable
-to allowing nodes to be aliased.
-
-Parameters:
- I A node to store in a named node map.
-
-The node will later be accessible using the value of the nodeName
-attribute of the node. If a node with that name is
-already present in the map, it is replaced by the new one.
-
-Return Value: If the new Node replaces an existing node with the same
-name the previously existing Node is returned, otherwise undef is returned.
-
-DOMExceptions:
-
-=over 4
-
-=item * WRONG_DOCUMENT_ERR
-
-Raised if arg was created from a different document than the one that
-created the NamedNodeMap.
-
-=item * NO_MODIFICATION_ALLOWED_ERR
-
-Raised if this NamedNodeMap is readonly.
-
-=item * INUSE_ATTRIBUTE_ERR
-
-Raised if arg is an Attr that is already an attribute of another Element object.
-The DOM user must explicitly clone Attr nodes to re-use them in other elements.
-
-=back
-
-=item removeNamedItem (name)
-
-Removes a node specified by name. If the removed node is an
-Attr with a default value it is immediately replaced.
-
-Return Value: The node removed from the map or undef if no node with
-such a name exists.
-
-DOMException:
-
-=over 4
-
-=item * NOT_FOUND_ERR
-
-Raised if there is no node named name in the map.
-
-=back
-
-=item item (index)
-
-Returns the indexth item in the map. If index is greater than
-or equal to the number of nodes in the map, this returns undef.
-
-Return Value: The node at the indexth position in the NamedNodeMap, or
-undef if that is not a valid index.
-
-=item getLength
-
-Returns the number of nodes in the map. The range of valid child node
-indices is 0 to length-1 inclusive.
-
-=back
-
-=head2 Additional methods not in the DOM Spec
-
-=over 4
-
-=item getValues
-
-Returns a NodeList with the nodes contained in the NamedNodeMap.
-The NodeList is "live", in that it reflects changes made to the NamedNodeMap.
-
-When this method is called in a list context, it returns a regular perl list
-containing the values. Note that this list is not "live". E.g.
-
- @list = $map->getValues; # returns a perl list
- $nodelist = $map->getValues; # returns a NodeList (object ref.)
- for my $val ($map->getValues) # iterate over the values
-
-=item getChildIndex (node)
-
-Returns the index of the node in the NodeList as returned by getValues, or -1
-if the node is not in the NamedNodeMap.
-
-=item dispose
-
-Removes all circular references in this NamedNodeMap and its descendants so the
-objects can be claimed for garbage collection. The objects should not be used
-afterwards.
-
-=back
diff --git a/spaces/allknowingroger/Image-Models-Test83/README.md b/spaces/allknowingroger/Image-Models-Test83/README.md
deleted file mode 100644
index 68d25ba38dd1a66eb9143160c0e6fcfccced8ca2..0000000000000000000000000000000000000000
--- a/spaces/allknowingroger/Image-Models-Test83/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: More Image Models
-emoji: 😻
-colorFrom: red
-colorTo: gray
-sdk: gradio
-sdk_version: 3.23.0
-app_file: app.py
-pinned: true
-duplicated_from: allknowingroger/Image-Models-Test82
----
-
-
\ No newline at end of file
diff --git a/spaces/almakedon/faster-whisper-webui/src/conversion/hf_converter.py b/spaces/almakedon/faster-whisper-webui/src/conversion/hf_converter.py
deleted file mode 100644
index 6da4f0fd672d63b099f21d0498ba4001d23356f7..0000000000000000000000000000000000000000
--- a/spaces/almakedon/faster-whisper-webui/src/conversion/hf_converter.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# https://github.com/bayartsogt-ya/whisper-multiple-hf-datasets
-
-from copy import deepcopy
-import torch
-
-WHISPER_MAPPING = {
- "layers": "blocks",
- "fc1": "mlp.0",
- "fc2": "mlp.2",
- "final_layer_norm": "mlp_ln",
- "layers": "blocks",
- ".self_attn.q_proj": ".attn.query",
- ".self_attn.k_proj": ".attn.key",
- ".self_attn.v_proj": ".attn.value",
- ".self_attn_layer_norm": ".attn_ln",
- ".self_attn.out_proj": ".attn.out",
- ".encoder_attn.q_proj": ".cross_attn.query",
- ".encoder_attn.k_proj": ".cross_attn.key",
- ".encoder_attn.v_proj": ".cross_attn.value",
- ".encoder_attn_layer_norm": ".cross_attn_ln",
- ".encoder_attn.out_proj": ".cross_attn.out",
- "decoder.layer_norm.": "decoder.ln.",
- "encoder.layer_norm.": "encoder.ln_post.",
- "embed_tokens": "token_embedding",
- "encoder.embed_positions.weight": "encoder.positional_embedding",
- "decoder.embed_positions.weight": "decoder.positional_embedding",
- "layer_norm": "ln_post",
-}
-
-
-def rename_keys(s_dict):
- keys = list(s_dict.keys())
- for key in keys:
- new_key = key
- for k, v in WHISPER_MAPPING.items():
- if k in key:
- new_key = new_key.replace(k, v)
-
- print(f"{key} -> {new_key}")
-
- s_dict[new_key] = s_dict.pop(key)
- return s_dict
-
-
-def convert_hf_whisper(hf_model_name_or_path: str, whisper_state_path: str):
- from transformers import WhisperForConditionalGeneration
- transformer_model = WhisperForConditionalGeneration.from_pretrained(hf_model_name_or_path)
- config = transformer_model.config
-
- # first build dims
- dims = {
- 'n_mels': config.num_mel_bins,
- 'n_vocab': config.vocab_size,
- 'n_audio_ctx': config.max_source_positions,
- 'n_audio_state': config.d_model,
- 'n_audio_head': config.encoder_attention_heads,
- 'n_audio_layer': config.encoder_layers,
- 'n_text_ctx': config.max_target_positions,
- 'n_text_state': config.d_model,
- 'n_text_head': config.decoder_attention_heads,
- 'n_text_layer': config.decoder_layers
- }
-
- state_dict = deepcopy(transformer_model.model.state_dict())
- state_dict = rename_keys(state_dict)
-
- torch.save({"dims": dims, "model_state_dict": state_dict}, whisper_state_path)
\ No newline at end of file
diff --git a/spaces/amarchheda/ChordDuplicate/portaudio/src/os/unix/pa_unix_util.c b/spaces/amarchheda/ChordDuplicate/portaudio/src/os/unix/pa_unix_util.c
deleted file mode 100644
index 459b2bef3eaccf52a892933ac42601327ca8d289..0000000000000000000000000000000000000000
--- a/spaces/amarchheda/ChordDuplicate/portaudio/src/os/unix/pa_unix_util.c
+++ /dev/null
@@ -1,710 +0,0 @@
-/*
- * $Id$
- * Portable Audio I/O Library
- * UNIX platform-specific support functions
- *
- * Based on the Open Source API proposed by Ross Bencina
- * Copyright (c) 1999-2000 Ross Bencina
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files
- * (the "Software"), to deal in the Software without restriction,
- * including without limitation the rights to use, copy, modify, merge,
- * publish, distribute, sublicense, and/or sell copies of the Software,
- * and to permit persons to whom the Software is furnished to do so,
- * subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
- * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * The text above constitutes the entire PortAudio license; however,
- * the PortAudio community also makes the following non-binding requests:
- *
- * Any person wishing to distribute modifications to the Software is
- * requested to send the modifications to the original developer so that
- * they can be incorporated into the canonical version. It is also
- * requested that these non-binding requests be included along with the
- * license above.
- */
-
-/** @file
- @ingroup unix_src
-*/
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include /* For memset */
-#include
-#include
-
-#if defined(__APPLE__) && !defined(HAVE_MACH_ABSOLUTE_TIME)
-#define HAVE_MACH_ABSOLUTE_TIME
-#endif
-#ifdef HAVE_MACH_ABSOLUTE_TIME
-#include
-#endif
-
-#include "pa_util.h"
-#include "pa_unix_util.h"
-#include "pa_debugprint.h"
-
-/*
- Track memory allocations to avoid leaks.
- */
-
-#if PA_TRACK_MEMORY
-static int numAllocations_ = 0;
-#endif
-
-
-void *PaUtil_AllocateMemory( long size )
-{
- void *result = malloc( size );
-
-#if PA_TRACK_MEMORY
- if( result != NULL ) numAllocations_ += 1;
-#endif
- return result;
-}
-
-
-void PaUtil_FreeMemory( void *block )
-{
- if( block != NULL )
- {
- free( block );
-#if PA_TRACK_MEMORY
- numAllocations_ -= 1;
-#endif
-
- }
-}
-
-
-int PaUtil_CountCurrentlyAllocatedBlocks( void )
-{
-#if PA_TRACK_MEMORY
- return numAllocations_;
-#else
- return 0;
-#endif
-}
-
-
-void Pa_Sleep( long msec )
-{
-#ifdef HAVE_NANOSLEEP
- struct timespec req = {0}, rem = {0};
- PaTime time = msec / 1.e3;
- req.tv_sec = (time_t)time;
- assert(time - req.tv_sec < 1.0);
- req.tv_nsec = (long)((time - req.tv_sec) * 1.e9);
- nanosleep(&req, &rem);
- /* XXX: Try sleeping the remaining time (contained in rem) if interrupted by a signal? */
-#else
- while( msec > 999 ) /* For OpenBSD and IRIX, argument */
- { /* to usleep must be < 1000000. */
- usleep( 999000 );
- msec -= 999;
- }
- usleep( msec * 1000 );
-#endif
-}
-
-#ifdef HAVE_MACH_ABSOLUTE_TIME
-/*
- Discussion on the CoreAudio mailing list suggests that calling
- gettimeofday (or anything else in the BSD layer) is not real-time
- safe, so we use mach_absolute_time on OSX. This implementation is
- based on these two links:
-
- Technical Q&A QA1398 - Mach Absolute Time Units
- http://developer.apple.com/mac/library/qa/qa2004/qa1398.html
-
- Tutorial: Performance and Time.
- http://www.macresearch.org/tutorial_performance_and_time
-*/
-
-/* Scaler to convert the result of mach_absolute_time to seconds */
-static double machSecondsConversionScaler_ = 0.0;
-#endif
-
-void PaUtil_InitializeClock( void )
-{
-#ifdef HAVE_MACH_ABSOLUTE_TIME
- mach_timebase_info_data_t info;
- kern_return_t err = mach_timebase_info( &info );
- if( err == 0 )
- machSecondsConversionScaler_ = 1e-9 * (double) info.numer / (double) info.denom;
-#endif
-}
-
-
-PaTime PaUtil_GetTime( void )
-{
-#ifdef HAVE_MACH_ABSOLUTE_TIME
- return mach_absolute_time() * machSecondsConversionScaler_;
-#elif defined(HAVE_CLOCK_GETTIME)
- struct timespec tp;
- clock_gettime(CLOCK_REALTIME, &tp);
- return (PaTime)(tp.tv_sec + tp.tv_nsec * 1e-9);
-#else
- struct timeval tv;
- gettimeofday( &tv, NULL );
- return (PaTime) tv.tv_usec * 1e-6 + tv.tv_sec;
-#endif
-}
-
-PaError PaUtil_InitializeThreading( PaUtilThreading *threading )
-{
- (void) paUtilErr_;
- return paNoError;
-}
-
-void PaUtil_TerminateThreading( PaUtilThreading *threading )
-{
-}
-
-PaError PaUtil_StartThreading( PaUtilThreading *threading, void *(*threadRoutine)(void *), void *data )
-{
- pthread_create( &threading->callbackThread, NULL, threadRoutine, data );
- return paNoError;
-}
-
-PaError PaUtil_CancelThreading( PaUtilThreading *threading, int wait, PaError *exitResult )
-{
- PaError result = paNoError;
- void *pret;
-
- if( exitResult )
- *exitResult = paNoError;
-
- /* If pthread_cancel is not supported (Android platform) whole this function can lead to indefinite waiting if
- working thread (callbackThread) has'n received any stop signals from outside, please keep
- this in mind when considering using PaUtil_CancelThreading
- */
-#ifdef PTHREAD_CANCELED
- /* Only kill the thread if it isn't in the process of stopping (flushing adaptation buffers) */
- if( !wait )
- pthread_cancel( threading->callbackThread ); /* XXX: Safe to call this if the thread has exited on its own? */
-#endif
- pthread_join( threading->callbackThread, &pret );
-
-#ifdef PTHREAD_CANCELED
- if( pret && PTHREAD_CANCELED != pret )
-#else
- /* !wait means the thread may have been canceled */
- if( pret && wait )
-#endif
- {
- if( exitResult )
- *exitResult = *(PaError *) pret;
- free( pret );
- }
-
- return result;
-}
-
-/* Threading */
-/* paUnixMainThread
- * We have to be a bit careful with defining this global variable,
- * as explained below. */
-#ifdef __APPLE__
-/* apple/gcc has a "problem" with global vars and dynamic libs.
- Initializing it seems to fix the problem.
- Described a bit in this thread:
- http://gcc.gnu.org/ml/gcc/2005-06/msg00179.html
-*/
-pthread_t paUnixMainThread = 0;
-#else
-/*pthreads are opaque. We don't know that assigning it an int value
- always makes sense, so we don't initialize it unless we have to.*/
-pthread_t paUnixMainThread = 0;
-#endif
-
-PaError PaUnixThreading_Initialize( void )
-{
- paUnixMainThread = pthread_self();
- return paNoError;
-}
-
-static PaError BoostPriority( PaUnixThread* self )
-{
- PaError result = paNoError;
- struct sched_param spm = { 0 };
- /* Priority should only matter between contending FIFO threads? */
- spm.sched_priority = 1;
-
- assert( self );
-
- if( pthread_setschedparam( self->thread, SCHED_FIFO, &spm ) != 0 )
- {
- PA_UNLESS( errno == EPERM, paInternalError ); /* Lack permission to raise priority */
- PA_DEBUG(( "Failed bumping priority\n" ));
- result = 0;
- }
- else
- {
- result = 1; /* Success */
- }
-error:
- return result;
-}
-
-PaError PaUnixThread_New( PaUnixThread* self, void* (*threadFunc)( void* ), void* threadArg, PaTime waitForChild,
- int rtSched )
-{
- PaError result = paNoError;
- pthread_attr_t attr;
- int started = 0;
-
- memset( self, 0, sizeof (PaUnixThread) );
- PaUnixMutex_Initialize( &self->mtx );
- PA_ASSERT_CALL( pthread_cond_init( &self->cond, NULL ), 0 );
-
- self->parentWaiting = 0 != waitForChild;
-
- /* Spawn thread */
-
-/* Temporarily disabled since we should test during configuration for presence of required mman.h header */
-#if 0
-#if defined _POSIX_MEMLOCK && (_POSIX_MEMLOCK != -1)
- if( rtSched )
- {
- if( mlockall( MCL_CURRENT | MCL_FUTURE ) < 0 )
- {
- int savedErrno = errno; /* In case errno gets overwritten */
- assert( savedErrno != EINVAL ); /* Most likely a programmer error */
- PA_UNLESS( (savedErrno == EPERM), paInternalError );
- PA_DEBUG(( "%s: Failed locking memory\n", __FUNCTION__ ));
- }
- else
- PA_DEBUG(( "%s: Successfully locked memory\n", __FUNCTION__ ));
- }
-#endif
-#endif
-
- PA_UNLESS( !pthread_attr_init( &attr ), paInternalError );
- /* Priority relative to other processes */
- PA_UNLESS( !pthread_attr_setscope( &attr, PTHREAD_SCOPE_SYSTEM ), paInternalError );
-
- PA_UNLESS( !pthread_create( &self->thread, &attr, threadFunc, threadArg ), paInternalError );
- started = 1;
-
- if( rtSched )
- {
-#if 0
- if( self->useWatchdog )
- {
- int err;
- struct sched_param wdSpm = { 0 };
- /* Launch watchdog, watchdog sets callback thread priority */
- int prio = PA_MIN( self->rtPrio + 4, sched_get_priority_max( SCHED_FIFO ) );
- wdSpm.sched_priority = prio;
-
- PA_UNLESS( !pthread_attr_init( &attr ), paInternalError );
- PA_UNLESS( !pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED ), paInternalError );
- PA_UNLESS( !pthread_attr_setscope( &attr, PTHREAD_SCOPE_SYSTEM ), paInternalError );
- PA_UNLESS( !pthread_attr_setschedpolicy( &attr, SCHED_FIFO ), paInternalError );
- PA_UNLESS( !pthread_attr_setschedparam( &attr, &wdSpm ), paInternalError );
- if( (err = pthread_create( &self->watchdogThread, &attr, &WatchdogFunc, self )) )
- {
- PA_UNLESS( err == EPERM, paInternalError );
- /* Permission error, go on without realtime privileges */
- PA_DEBUG(( "Failed bumping priority\n" ));
- }
- else
- {
- int policy;
- self->watchdogRunning = 1;
- PA_ENSURE_SYSTEM( pthread_getschedparam( self->watchdogThread, &policy, &wdSpm ), 0 );
- /* Check if priority is right, policy could potentially differ from SCHED_FIFO (but that's alright) */
- if( wdSpm.sched_priority != prio )
- {
- PA_DEBUG(( "Watchdog priority not set correctly (%d)\n", wdSpm.sched_priority ));
- PA_ENSURE( paInternalError );
- }
- }
- }
- else
-#endif
- PA_ENSURE( BoostPriority( self ) );
-
- {
- int policy;
- struct sched_param spm;
- pthread_getschedparam(self->thread, &policy, &spm);
- }
- }
-
- if( self->parentWaiting )
- {
- PaTime till;
- struct timespec ts;
- int res = 0;
- PaTime now;
-
- PA_ENSURE( PaUnixMutex_Lock( &self->mtx ) );
-
- /* Wait for stream to be started */
- now = PaUtil_GetTime();
- till = now + waitForChild;
-
- while( self->parentWaiting && !res )
- {
- if( waitForChild > 0 )
- {
- ts.tv_sec = (time_t) floor( till );
- ts.tv_nsec = (long) ((till - floor( till )) * 1e9);
- res = pthread_cond_timedwait( &self->cond, &self->mtx.mtx, &ts );
- }
- else
- {
- res = pthread_cond_wait( &self->cond, &self->mtx.mtx );
- }
- }
-
- PA_ENSURE( PaUnixMutex_Unlock( &self->mtx ) );
-
- PA_UNLESS( !res || ETIMEDOUT == res, paInternalError );
- PA_DEBUG(( "%s: Waited for %g seconds for stream to start\n", __FUNCTION__, PaUtil_GetTime() - now ));
- if( ETIMEDOUT == res )
- {
- PA_ENSURE( paTimedOut );
- }
- }
-
-end:
- return result;
-error:
- if( started )
- {
- PaUnixThread_Terminate( self, 0, NULL );
- }
-
- goto end;
-}
-
-PaError PaUnixThread_Terminate( PaUnixThread* self, int wait, PaError* exitResult )
-{
- PaError result = paNoError;
- void* pret;
-
- if( exitResult )
- {
- *exitResult = paNoError;
- }
-#if 0
- if( watchdogExitResult )
- *watchdogExitResult = paNoError;
-
- if( th->watchdogRunning )
- {
- pthread_cancel( th->watchdogThread );
- PA_ENSURE_SYSTEM( pthread_join( th->watchdogThread, &pret ), 0 );
-
- if( pret && pret != PTHREAD_CANCELED )
- {
- if( watchdogExitResult )
- *watchdogExitResult = *(PaError *) pret;
- free( pret );
- }
- }
-#endif
-
- /* Only kill the thread if it isn't in the process of stopping (flushing adaptation buffers) */
- /* TODO: Make join time out */
- self->stopRequested = wait;
- if( !wait )
- {
- PA_DEBUG(( "%s: Canceling thread %d\n", __FUNCTION__, self->thread ));
- /* XXX: Safe to call this if the thread has exited on its own? */
-#ifdef PTHREAD_CANCELED
- pthread_cancel( self->thread );
-#endif
- }
- PA_DEBUG(( "%s: Joining thread %d\n", __FUNCTION__, self->thread ));
- PA_ENSURE_SYSTEM( pthread_join( self->thread, &pret ), 0 );
-
-#ifdef PTHREAD_CANCELED
- if( pret && PTHREAD_CANCELED != pret )
-#else
- /* !wait means the thread may have been canceled */
- if( pret && wait )
-#endif
- {
- if( exitResult )
- {
- *exitResult = *(PaError*)pret;
- }
- free( pret );
- }
-
-error:
- PA_ASSERT_CALL( PaUnixMutex_Terminate( &self->mtx ), paNoError );
- PA_ASSERT_CALL( pthread_cond_destroy( &self->cond ), 0 );
-
- return result;
-}
-
-PaError PaUnixThread_PrepareNotify( PaUnixThread* self )
-{
- PaError result = paNoError;
- PA_UNLESS( self->parentWaiting, paInternalError );
-
- PA_ENSURE( PaUnixMutex_Lock( &self->mtx ) );
- self->locked = 1;
-
-error:
- return result;
-}
-
-PaError PaUnixThread_NotifyParent( PaUnixThread* self )
-{
- PaError result = paNoError;
- PA_UNLESS( self->parentWaiting, paInternalError );
-
- if( !self->locked )
- {
- PA_ENSURE( PaUnixMutex_Lock( &self->mtx ) );
- self->locked = 1;
- }
- self->parentWaiting = 0;
- pthread_cond_signal( &self->cond );
- PA_ENSURE( PaUnixMutex_Unlock( &self->mtx ) );
- self->locked = 0;
-
-error:
- return result;
-}
-
-int PaUnixThread_StopRequested( PaUnixThread* self )
-{
- return self->stopRequested;
-}
-
-PaError PaUnixMutex_Initialize( PaUnixMutex* self )
-{
- PaError result = paNoError;
- PA_ASSERT_CALL( pthread_mutex_init( &self->mtx, NULL ), 0 );
- return result;
-}
-
-PaError PaUnixMutex_Terminate( PaUnixMutex* self )
-{
- PaError result = paNoError;
- PA_ASSERT_CALL( pthread_mutex_destroy( &self->mtx ), 0 );
- return result;
-}
-
-/** Lock mutex.
- *
- * We're disabling thread cancellation while the thread is holding a lock, so mutexes are
- * properly unlocked at termination time.
- */
-PaError PaUnixMutex_Lock( PaUnixMutex* self )
-{
- PaError result = paNoError;
-
-#ifdef PTHREAD_CANCEL
- int oldState;
- PA_ENSURE_SYSTEM( pthread_setcancelstate( PTHREAD_CANCEL_DISABLE, &oldState ), 0 );
-#endif
- PA_ENSURE_SYSTEM( pthread_mutex_lock( &self->mtx ), 0 );
-
-error:
- return result;
-}
-
-/** Unlock mutex.
- *
- * Thread cancellation is enabled again after the mutex is properly unlocked.
- */
-PaError PaUnixMutex_Unlock( PaUnixMutex* self )
-{
- PaError result = paNoError;
-
- PA_ENSURE_SYSTEM( pthread_mutex_unlock( &self->mtx ), 0 );
-#ifdef PTHREAD_CANCEL
- int oldState;
- PA_ENSURE_SYSTEM( pthread_setcancelstate( PTHREAD_CANCEL_ENABLE, &oldState ), 0 );
-#endif
-
-error:
- return result;
-}
-
-
-#if 0
-static void OnWatchdogExit( void *userData )
-{
- PaAlsaThreading *th = (PaAlsaThreading *) userData;
- struct sched_param spm = { 0 };
- assert( th );
-
- PA_ASSERT_CALL( pthread_setschedparam( th->callbackThread, SCHED_OTHER, &spm ), 0 ); /* Lower before exiting */
- PA_DEBUG(( "Watchdog exiting\n" ));
-}
-
-static void *WatchdogFunc( void *userData )
-{
- PaError result = paNoError, *pres = NULL;
- int err;
- PaAlsaThreading *th = (PaAlsaThreading *) userData;
- unsigned intervalMsec = 500;
- const PaTime maxSeconds = 3.; /* Max seconds between callbacks */
- PaTime timeThen = PaUtil_GetTime(), timeNow, timeElapsed, cpuTimeThen, cpuTimeNow, cpuTimeElapsed;
- double cpuLoad, avgCpuLoad = 0.;
- int throttled = 0;
-
- assert( th );
-
- /* Execute OnWatchdogExit when exiting */
- pthread_cleanup_push( &OnWatchdogExit, th );
-
- /* Boost priority of callback thread */
- PA_ENSURE( result = BoostPriority( th ) );
- if( !result )
- {
- /* Boost failed, might as well exit */
- pthread_exit( NULL );
- }
-
- cpuTimeThen = th->callbackCpuTime;
- {
- int policy;
- struct sched_param spm = { 0 };
- pthread_getschedparam( pthread_self(), &policy, &spm );
- PA_DEBUG(( "%s: Watchdog priority is %d\n", __FUNCTION__, spm.sched_priority ));
- }
-
- while( 1 )
- {
- double lowpassCoeff = 0.9, lowpassCoeff1 = 0.99999 - lowpassCoeff;
-
- /* Test before and after in case whatever underlying sleep call isn't interrupted by pthread_cancel */
- pthread_testcancel();
- Pa_Sleep( intervalMsec );
- pthread_testcancel();
-
- if( PaUtil_GetTime() - th->callbackTime > maxSeconds )
- {
- PA_DEBUG(( "Watchdog: Terminating callback thread\n" ));
- /* Tell thread to terminate */
- err = pthread_kill( th->callbackThread, SIGKILL );
- pthread_exit( NULL );
- }
-
- PA_DEBUG(( "%s: PortAudio reports CPU load: %g\n", __FUNCTION__, PaUtil_GetCpuLoad( th->cpuLoadMeasurer ) ));
-
- /* Check if we should throttle, or unthrottle :P */
- cpuTimeNow = th->callbackCpuTime;
- cpuTimeElapsed = cpuTimeNow - cpuTimeThen;
- cpuTimeThen = cpuTimeNow;
-
- timeNow = PaUtil_GetTime();
- timeElapsed = timeNow - timeThen;
- timeThen = timeNow;
- cpuLoad = cpuTimeElapsed / timeElapsed;
- avgCpuLoad = avgCpuLoad * lowpassCoeff + cpuLoad * lowpassCoeff1;
- /*
- if( throttled )
- PA_DEBUG(( "Watchdog: CPU load: %g, %g\n", avgCpuLoad, cpuTimeElapsed ));
- */
- if( PaUtil_GetCpuLoad( th->cpuLoadMeasurer ) > .925 )
- {
- static int policy;
- static struct sched_param spm = { 0 };
- static const struct sched_param defaultSpm = { 0 };
- PA_DEBUG(( "%s: Throttling audio thread, priority %d\n", __FUNCTION__, spm.sched_priority ));
-
- pthread_getschedparam( th->callbackThread, &policy, &spm );
- if( !pthread_setschedparam( th->callbackThread, SCHED_OTHER, &defaultSpm ) )
- {
- throttled = 1;
- }
- else
- PA_DEBUG(( "Watchdog: Couldn't lower priority of audio thread: %s\n", strerror( errno ) ));
-
- /* Give other processes a go, before raising priority again */
- PA_DEBUG(( "%s: Watchdog sleeping for %lu msecs before unthrottling\n", __FUNCTION__, th->throttledSleepTime ));
- Pa_Sleep( th->throttledSleepTime );
-
- /* Reset callback priority */
- if( pthread_setschedparam( th->callbackThread, SCHED_FIFO, &spm ) != 0 )
- {
- PA_DEBUG(( "%s: Couldn't raise priority of audio thread: %s\n", __FUNCTION__, strerror( errno ) ));
- }
-
- if( PaUtil_GetCpuLoad( th->cpuLoadMeasurer ) >= .99 )
- intervalMsec = 50;
- else
- intervalMsec = 100;
-
- /*
- lowpassCoeff = .97;
- lowpassCoeff1 = .99999 - lowpassCoeff;
- */
- }
- else if( throttled && avgCpuLoad < .8 )
- {
- intervalMsec = 500;
- throttled = 0;
-
- /*
- lowpassCoeff = .9;
- lowpassCoeff1 = .99999 - lowpassCoeff;
- */
- }
- }
-
- pthread_cleanup_pop( 1 ); /* Execute cleanup on exit */
-
-error:
- /* Shouldn't get here in the normal case */
-
- /* Pass on error code */
- pres = malloc( sizeof (PaError) );
- *pres = result;
-
- pthread_exit( pres );
-}
-
-static void CallbackUpdate( PaAlsaThreading *th )
-{
- th->callbackTime = PaUtil_GetTime();
- th->callbackCpuTime = PaUtil_GetCpuLoad( th->cpuLoadMeasurer );
-}
-
-/*
-static void *CanaryFunc( void *userData )
-{
- const unsigned intervalMsec = 1000;
- PaUtilThreading *th = (PaUtilThreading *) userData;
-
- while( 1 )
- {
- th->canaryTime = PaUtil_GetTime();
-
- pthread_testcancel();
- Pa_Sleep( intervalMsec );
- }
-
- pthread_exit( NULL );
-}
-*/
-#endif
diff --git a/spaces/antonovmaxim/text-generation-webui-space/modules/logging_colors.py b/spaces/antonovmaxim/text-generation-webui-space/modules/logging_colors.py
deleted file mode 100644
index 5c9714f7cd08f88f30335dfc0b7a694879414a68..0000000000000000000000000000000000000000
--- a/spaces/antonovmaxim/text-generation-webui-space/modules/logging_colors.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# Copied from https://stackoverflow.com/a/1336640
-
-import logging
-import platform
-
-
-def add_coloring_to_emit_windows(fn):
- # add methods we need to the class
- def _out_handle(self):
- import ctypes
- return ctypes.windll.kernel32.GetStdHandle(self.STD_OUTPUT_HANDLE)
- out_handle = property(_out_handle)
-
- def _set_color(self, code):
- import ctypes
-
- # Constants from the Windows API
- self.STD_OUTPUT_HANDLE = -11
- hdl = ctypes.windll.kernel32.GetStdHandle(self.STD_OUTPUT_HANDLE)
- ctypes.windll.kernel32.SetConsoleTextAttribute(hdl, code)
-
- setattr(logging.StreamHandler, '_set_color', _set_color)
-
- def new(*args):
- FOREGROUND_BLUE = 0x0001 # text color contains blue.
- FOREGROUND_GREEN = 0x0002 # text color contains green.
- FOREGROUND_RED = 0x0004 # text color contains red.
- FOREGROUND_INTENSITY = 0x0008 # text color is intensified.
- FOREGROUND_WHITE = FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED
- # winbase.h
- # STD_INPUT_HANDLE = -10
- # STD_OUTPUT_HANDLE = -11
- # STD_ERROR_HANDLE = -12
-
- # wincon.h
- # FOREGROUND_BLACK = 0x0000
- FOREGROUND_BLUE = 0x0001
- FOREGROUND_GREEN = 0x0002
- # FOREGROUND_CYAN = 0x0003
- FOREGROUND_RED = 0x0004
- FOREGROUND_MAGENTA = 0x0005
- FOREGROUND_YELLOW = 0x0006
- # FOREGROUND_GREY = 0x0007
- FOREGROUND_INTENSITY = 0x0008 # foreground color is intensified.
-
- # BACKGROUND_BLACK = 0x0000
- # BACKGROUND_BLUE = 0x0010
- # BACKGROUND_GREEN = 0x0020
- # BACKGROUND_CYAN = 0x0030
- # BACKGROUND_RED = 0x0040
- # BACKGROUND_MAGENTA = 0x0050
- BACKGROUND_YELLOW = 0x0060
- # BACKGROUND_GREY = 0x0070
- BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
-
- levelno = args[1].levelno
- if (levelno >= 50):
- color = BACKGROUND_YELLOW | FOREGROUND_RED | FOREGROUND_INTENSITY | BACKGROUND_INTENSITY
- elif (levelno >= 40):
- color = FOREGROUND_RED | FOREGROUND_INTENSITY
- elif (levelno >= 30):
- color = FOREGROUND_YELLOW | FOREGROUND_INTENSITY
- elif (levelno >= 20):
- color = FOREGROUND_GREEN
- elif (levelno >= 10):
- color = FOREGROUND_MAGENTA
- else:
- color = FOREGROUND_WHITE
- args[0]._set_color(color)
-
- ret = fn(*args)
- args[0]._set_color(FOREGROUND_WHITE)
- # print "after"
- return ret
- return new
-
-
-def add_coloring_to_emit_ansi(fn):
- # add methods we need to the class
- def new(*args):
- levelno = args[1].levelno
- if (levelno >= 50):
- color = '\x1b[31m' # red
- elif (levelno >= 40):
- color = '\x1b[31m' # red
- elif (levelno >= 30):
- color = '\x1b[33m' # yellow
- elif (levelno >= 20):
- color = '\x1b[32m' # green
- elif (levelno >= 10):
- color = '\x1b[35m' # pink
- else:
- color = '\x1b[0m' # normal
- args[1].msg = color + args[1].msg + '\x1b[0m' # normal
- # print "after"
- return fn(*args)
- return new
-
-
-if platform.system() == 'Windows':
- # Windows does not support ANSI escapes and we are using API calls to set the console color
- logging.StreamHandler.emit = add_coloring_to_emit_windows(logging.StreamHandler.emit)
-else:
- # all non-Windows platforms are supporting ANSI escapes so we use them
- logging.StreamHandler.emit = add_coloring_to_emit_ansi(logging.StreamHandler.emit)
- # log = logging.getLogger()
- # log.addFilter(log_filter())
- # //hdlr = logging.StreamHandler()
- # //hdlr.setFormatter(formatter())
diff --git a/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/modules/GPTQ_loader.py b/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/modules/GPTQ_loader.py
deleted file mode 100644
index df18df569981cb308217f3b6a6f8dede5cc8b3db..0000000000000000000000000000000000000000
--- a/spaces/antonovmaxim/text-generation-webui-space/text-generation-webui-main/modules/GPTQ_loader.py
+++ /dev/null
@@ -1,199 +0,0 @@
-import inspect
-import logging
-import re
-import sys
-from pathlib import Path
-
-import accelerate
-import torch
-import transformers
-from transformers import AutoConfig, AutoModelForCausalLM
-
-import modules.shared as shared
-
-sys.path.insert(0, str(Path("repositories/GPTQ-for-LLaMa")))
-import llama_inference_offload
-
-try:
- from modelutils import find_layers
-except ImportError:
- from utils import find_layers
-
-try:
- from quant import make_quant
- is_triton = False
-except ImportError:
- import quant
- is_triton = True
-
-
-# This function is a replacement for the load_quant function in the
-# GPTQ-for_LLaMa repository. It supports more models and branches.
-def _load_quant(model, checkpoint, wbits, groupsize=-1, faster_kernel=False, exclude_layers=['lm_head'], kernel_switch_threshold=128, eval=True):
-
- def noop(*args, **kwargs):
- pass
-
- config = AutoConfig.from_pretrained(model)
- torch.nn.init.kaiming_uniform_ = noop
- torch.nn.init.uniform_ = noop
- torch.nn.init.normal_ = noop
-
- torch.set_default_dtype(torch.half)
- transformers.modeling_utils._init_weights = False
- torch.set_default_dtype(torch.half)
- model = AutoModelForCausalLM.from_config(config)
- torch.set_default_dtype(torch.float)
- if eval:
- model = model.eval()
- layers = find_layers(model)
- for name in exclude_layers:
- if name in layers:
- del layers[name]
-
- if not is_triton:
- gptq_args = inspect.getfullargspec(make_quant).args
-
- make_quant_kwargs = {
- 'module': model,
- 'names': layers,
- 'bits': wbits,
- }
- if 'groupsize' in gptq_args:
- make_quant_kwargs['groupsize'] = groupsize
- if 'faster' in gptq_args:
- make_quant_kwargs['faster'] = faster_kernel
- if 'kernel_switch_threshold' in gptq_args:
- make_quant_kwargs['kernel_switch_threshold'] = kernel_switch_threshold
-
- make_quant(**make_quant_kwargs)
- else:
- quant.make_quant_linear(model, layers, wbits, groupsize)
-
- del layers
-
- if checkpoint.endswith('.safetensors'):
- from safetensors.torch import load_file as safe_load
- model.load_state_dict(safe_load(checkpoint), strict=False)
- else:
- model.load_state_dict(torch.load(checkpoint), strict=False)
-
- if is_triton:
- if shared.args.quant_attn:
- quant.make_quant_attn(model)
- if eval and shared.args.fused_mlp:
- quant.make_fused_mlp(model)
-
- if shared.args.warmup_autotune:
- quant.autotune_warmup_linear(model, transpose=not eval)
- if eval and shared.args.fused_mlp:
- quant.autotune_warmup_fused(model)
-
- model.seqlen = 2048
- return model
-
-
-# Used to locate the .pt/.safetensors quantized file
-def find_quantized_model_file(model_name):
- path_to_model = Path(f'{shared.args.model_dir}/{model_name}')
- pt_path = None
- priority_name_list = [
- Path(f'{shared.args.model_dir}/{model_name}{hyphen}{shared.args.wbits}bit{group}{ext}')
- for group in ([f'-{shared.args.groupsize}g', ''] if shared.args.groupsize > 0 else [''])
- for ext in ['.safetensors', '.pt']
- for hyphen in ['-', f'/{model_name}-', '/']
- ]
- for path in priority_name_list:
- if path.exists():
- pt_path = path
- break
-
- # If the model hasn't been found with a well-behaved name, pick the last .pt
- # or the last .safetensors found in its folder as a last resort
- if not pt_path:
- found_pts = list(path_to_model.glob("*.pt"))
- found_safetensors = list(path_to_model.glob("*.safetensors"))
- pt_path = None
-
- if len(found_pts) > 0:
- if len(found_pts) > 1:
- logging.warning('More than one .pt model has been found. The last one will be selected. It could be wrong.')
-
- pt_path = found_pts[-1]
- elif len(found_safetensors) > 0:
- if len(found_pts) > 1:
- logging.warning('More than one .safetensors model has been found. The last one will be selected. It could be wrong.')
-
- pt_path = found_safetensors[-1]
-
- return pt_path
-
-
-# The function that loads the model in modules/models.py
-def load_quantized(model_name):
-
- # Find the model type
- if not shared.args.model_type:
- name = model_name.lower()
- if any((k in name for k in ['llama', 'alpaca', 'vicuna', 'llava'])):
- model_type = 'llama'
- elif any((k in name for k in ['opt-', 'galactica'])):
- model_type = 'opt'
- elif any((k in name for k in ['gpt-j', 'pygmalion-6b'])):
- model_type = 'gptj'
- else:
- logging.error("Can't determine model type from model name. Please specify it manually using --model_type argument")
- exit()
- else:
- model_type = shared.args.model_type.lower()
-
- # Select the appropriate load_quant function
- if shared.args.pre_layer and model_type == 'llama':
- load_quant = llama_inference_offload.load_quant
- elif model_type in ('llama', 'opt', 'gptj'):
- if shared.args.pre_layer:
- logging.warning("Ignoring --pre_layer because it only works for llama model type.")
-
- load_quant = _load_quant
- else:
- logging.error("Unknown pre-quantized model type specified. Only 'llama', 'opt' and 'gptj' are supported")
- exit()
-
- # Find the quantized model weights file (.pt/.safetensors)
- path_to_model = Path(f'{shared.args.model_dir}/{model_name}')
- pt_path = find_quantized_model_file(model_name)
- if not pt_path:
- logging.error("Could not find the quantized model in .pt or .safetensors format, exiting...")
- exit()
- else:
- logging.info(f"Found the following quantized model: {pt_path}")
-
- # qwopqwop200's offload
- if model_type == 'llama' and shared.args.pre_layer:
- model = load_quant(str(path_to_model), str(pt_path), shared.args.wbits, shared.args.groupsize, shared.args.pre_layer)
- else:
- threshold = False if model_type == 'gptj' else 128
- model = load_quant(str(path_to_model), str(pt_path), shared.args.wbits, shared.args.groupsize, kernel_switch_threshold=threshold)
-
- # accelerate offload (doesn't work properly)
- if shared.args.gpu_memory or torch.cuda.device_count() > 1:
- if shared.args.gpu_memory:
- memory_map = list(map(lambda x: x.strip(), shared.args.gpu_memory))
- max_cpu_memory = shared.args.cpu_memory.strip() if shared.args.cpu_memory is not None else '99GiB'
- max_memory = {}
- for i in range(len(memory_map)):
- max_memory[i] = f'{memory_map[i]}GiB' if not re.match('.*ib$', memory_map[i].lower()) else memory_map[i]
- max_memory['cpu'] = max_cpu_memory
- else:
- max_memory = accelerate.utils.get_balanced_memory(model)
-
- device_map = accelerate.infer_auto_device_map(model, max_memory=max_memory, no_split_module_classes=["LlamaDecoderLayer"])
- logging.info("Using the following device map for the quantized model:", device_map)
- # https://huggingface.co/docs/accelerate/package_reference/big_modeling#accelerate.dispatch_model
- model = accelerate.dispatch_model(model, device_map=device_map, offload_buffers=True)
-
- # No offload
- elif not shared.args.cpu:
- model = model.to(torch.device('cuda:0'))
-
- return model
diff --git a/spaces/ardha27/rvc_TTS/vc_infer_pipeline.py b/spaces/ardha27/rvc_TTS/vc_infer_pipeline.py
deleted file mode 100644
index ed2aacd1866379563006e3cf4dd40472f7ab4692..0000000000000000000000000000000000000000
--- a/spaces/ardha27/rvc_TTS/vc_infer_pipeline.py
+++ /dev/null
@@ -1,451 +0,0 @@
-import os
-import sys
-import traceback
-from functools import lru_cache
-from time import time as ttime
-
-import faiss
-import librosa
-import numpy as np
-import parselmouth
-import pyworld
-import torch
-import torch.nn.functional as F
-import torchcrepe
-from scipy import signal
-
-now_dir = os.getcwd()
-sys.path.append(now_dir)
-
-bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
-
-input_audio_path2wav = {}
-
-
-@lru_cache
-def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period):
- audio = input_audio_path2wav[input_audio_path]
- f0, t = pyworld.harvest(
- audio,
- fs=fs,
- f0_ceil=f0max,
- f0_floor=f0min,
- frame_period=frame_period,
- )
- f0 = pyworld.stonemask(audio, f0, t, fs)
- return f0
-
-
-def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比
- # print(data1.max(),data2.max())
- rms1 = librosa.feature.rms(
- y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2
- ) # 每半秒一个点
- rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2)
- rms1 = torch.from_numpy(rms1)
- rms1 = F.interpolate(
- rms1.unsqueeze(0), size=data2.shape[0], mode="linear"
- ).squeeze()
- rms2 = torch.from_numpy(rms2)
- rms2 = F.interpolate(
- rms2.unsqueeze(0), size=data2.shape[0], mode="linear"
- ).squeeze()
- rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6)
- data2 *= (
- torch.pow(rms1, torch.tensor(1 - rate))
- * torch.pow(rms2, torch.tensor(rate - 1))
- ).numpy()
- return data2
-
-
-class VC(object):
- def __init__(self, tgt_sr, config):
- self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (
- config.x_pad,
- config.x_query,
- config.x_center,
- config.x_max,
- config.is_half,
- )
- self.sr = 16000 # hubert输入采样率
- self.window = 160 # 每帧点数
- self.t_pad = self.sr * self.x_pad # 每条前后pad时间
- self.t_pad_tgt = tgt_sr * self.x_pad
- self.t_pad2 = self.t_pad * 2
- self.t_query = self.sr * self.x_query # 查询切点前后查询时间
- self.t_center = self.sr * self.x_center # 查询切点位置
- self.t_max = self.sr * self.x_max # 免查询时长阈值
- self.device = config.device
-
- def get_f0(
- self,
- input_audio_path,
- x,
- p_len,
- f0_up_key,
- f0_method,
- filter_radius,
- inp_f0=None,
- ):
- global input_audio_path2wav
- time_step = self.window / self.sr * 1000
- f0_min = 50
- f0_max = 1100
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
- if f0_method == "pm":
- f0 = (
- parselmouth.Sound(x, self.sr)
- .to_pitch_ac(
- time_step=time_step / 1000,
- voicing_threshold=0.6,
- pitch_floor=f0_min,
- pitch_ceiling=f0_max,
- )
- .selected_array["frequency"]
- )
- pad_size = (p_len - len(f0) + 1) // 2
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
- f0 = np.pad(
- f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant"
- )
- elif f0_method == "harvest":
- input_audio_path2wav[input_audio_path] = x.astype(np.double)
- f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)
- if filter_radius > 2:
- f0 = signal.medfilt(f0, 3)
- elif f0_method == "crepe":
- model = "full"
- # Pick a batch size that doesn't cause memory errors on your gpu
- batch_size = 512
- # Compute pitch using first gpu
- audio = torch.tensor(np.copy(x))[None].float()
- f0, pd = torchcrepe.predict(
- audio,
- self.sr,
- self.window,
- f0_min,
- f0_max,
- model,
- batch_size=batch_size,
- device=self.device,
- return_periodicity=True,
- )
- pd = torchcrepe.filter.median(pd, 3)
- f0 = torchcrepe.filter.mean(f0, 3)
- f0[pd < 0.1] = 0
- f0 = f0[0].cpu().numpy()
- elif f0_method == "rmvpe":
- if hasattr(self, "model_rmvpe") == False:
- from rmvpe import RMVPE
-
- print("loading rmvpe model")
- self.model_rmvpe = RMVPE(
- "rmvpe.pt", is_half=self.is_half, device=self.device
- )
- f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)
- f0 *= pow(2, f0_up_key / 12)
- # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
- tf0 = self.sr // self.window # 每秒f0点数
- if inp_f0 is not None:
- delta_t = np.round(
- (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
- ).astype("int16")
- replace_f0 = np.interp(
- list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
- )
- shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]
- f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[
- :shape
- ]
- # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
- f0bak = f0.copy()
- f0_mel = 1127 * np.log(1 + f0 / 700)
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
- f0_mel_max - f0_mel_min
- ) + 1
- f0_mel[f0_mel <= 1] = 1
- f0_mel[f0_mel > 255] = 255
- f0_coarse = np.rint(f0_mel).astype(np.int)
- return f0_coarse, f0bak # 1-0
-
- def vc(
- self,
- model,
- net_g,
- sid,
- audio0,
- pitch,
- pitchf,
- times,
- index,
- big_npy,
- index_rate,
- version,
- protect,
- ): # ,file_index,file_big_npy
- feats = torch.from_numpy(audio0)
- if self.is_half:
- feats = feats.half()
- else:
- feats = feats.float()
- if feats.dim() == 2: # double channels
- feats = feats.mean(-1)
- assert feats.dim() == 1, feats.dim()
- feats = feats.view(1, -1)
- padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
-
- inputs = {
- "source": feats.to(self.device),
- "padding_mask": padding_mask,
- "output_layer": 9 if version == "v1" else 12,
- }
- t0 = ttime()
- with torch.no_grad():
- logits = model.extract_features(**inputs)
- feats = model.final_proj(logits[0]) if version == "v1" else logits[0]
- if protect < 0.5 and pitch != None and pitchf != None:
- feats0 = feats.clone()
- if (
- isinstance(index, type(None)) == False
- and isinstance(big_npy, type(None)) == False
- and index_rate != 0
- ):
- npy = feats[0].cpu().numpy()
- if self.is_half:
- npy = npy.astype("float32")
-
- # _, I = index.search(npy, 1)
- # npy = big_npy[I.squeeze()]
-
- score, ix = index.search(npy, k=8)
- weight = np.square(1 / score)
- weight /= weight.sum(axis=1, keepdims=True)
- npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
-
- if self.is_half:
- npy = npy.astype("float16")
- feats = (
- torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
- + (1 - index_rate) * feats
- )
-
- feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
- if protect < 0.5 and pitch != None and pitchf != None:
- feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(
- 0, 2, 1
- )
- t1 = ttime()
- p_len = audio0.shape[0] // self.window
- if feats.shape[1] < p_len:
- p_len = feats.shape[1]
- if pitch != None and pitchf != None:
- pitch = pitch[:, :p_len]
- pitchf = pitchf[:, :p_len]
-
- if protect < 0.5 and pitch != None and pitchf != None:
- pitchff = pitchf.clone()
- pitchff[pitchf > 0] = 1
- pitchff[pitchf < 1] = protect
- pitchff = pitchff.unsqueeze(-1)
- feats = feats * pitchff + feats0 * (1 - pitchff)
- feats = feats.to(feats0.dtype)
- p_len = torch.tensor([p_len], device=self.device).long()
- with torch.no_grad():
- if pitch != None and pitchf != None:
- audio1 = (
- (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0])
- .data.cpu()
- .float()
- .numpy()
- )
- else:
- audio1 = (
- (net_g.infer(feats, p_len, sid)[0][0, 0]).data.cpu().float().numpy()
- )
- del feats, p_len, padding_mask
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
- t2 = ttime()
- times[0] += t1 - t0
- times[2] += t2 - t1
- return audio1
-
- def pipeline(
- self,
- model,
- net_g,
- sid,
- audio,
- input_audio_path,
- times,
- f0_up_key,
- f0_method,
- file_index,
- # file_big_npy,
- index_rate,
- if_f0,
- filter_radius,
- tgt_sr,
- resample_sr,
- rms_mix_rate,
- version,
- protect,
- f0_file=None,
- ):
- if (
- file_index != ""
- # and file_big_npy != ""
- # and os.path.exists(file_big_npy) == True
- and os.path.exists(file_index) == True
- and index_rate != 0
- ):
- try:
- index = faiss.read_index(file_index)
- # big_npy = np.load(file_big_npy)
- big_npy = index.reconstruct_n(0, index.ntotal)
- except:
- traceback.print_exc()
- index = big_npy = None
- else:
- index = big_npy = None
- audio = signal.filtfilt(bh, ah, audio)
- audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
- opt_ts = []
- if audio_pad.shape[0] > self.t_max:
- audio_sum = np.zeros_like(audio)
- for i in range(self.window):
- audio_sum += audio_pad[i : i - self.window]
- for t in range(self.t_center, audio.shape[0], self.t_center):
- opt_ts.append(
- t
- - self.t_query
- + np.where(
- np.abs(audio_sum[t - self.t_query : t + self.t_query])
- == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
- )[0][0]
- )
- s = 0
- audio_opt = []
- t = None
- t1 = ttime()
- audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
- p_len = audio_pad.shape[0] // self.window
- inp_f0 = None
- if hasattr(f0_file, "name") == True:
- try:
- with open(f0_file.name, "r") as f:
- lines = f.read().strip("\n").split("\n")
- inp_f0 = []
- for line in lines:
- inp_f0.append([float(i) for i in line.split(",")])
- inp_f0 = np.array(inp_f0, dtype="float32")
- except:
- traceback.print_exc()
- sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
- pitch, pitchf = None, None
- if if_f0 == 1:
- pitch, pitchf = self.get_f0(
- input_audio_path,
- audio_pad,
- p_len,
- f0_up_key,
- f0_method,
- filter_radius,
- inp_f0,
- )
- pitch = pitch[:p_len]
- pitchf = pitchf[:p_len]
- if self.device == "mps":
- pitchf = pitchf.astype(np.float32)
- pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
- pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
- t2 = ttime()
- times[1] += t2 - t1
- for t in opt_ts:
- t = t // self.window * self.window
- if if_f0 == 1:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[s : t + self.t_pad2 + self.window],
- pitch[:, s // self.window : (t + self.t_pad2) // self.window],
- pitchf[:, s // self.window : (t + self.t_pad2) // self.window],
- times,
- index,
- big_npy,
- index_rate,
- version,
- protect,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- else:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[s : t + self.t_pad2 + self.window],
- None,
- None,
- times,
- index,
- big_npy,
- index_rate,
- version,
- protect,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- s = t
- if if_f0 == 1:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[t:],
- pitch[:, t // self.window :] if t is not None else pitch,
- pitchf[:, t // self.window :] if t is not None else pitchf,
- times,
- index,
- big_npy,
- index_rate,
- version,
- protect,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- else:
- audio_opt.append(
- self.vc(
- model,
- net_g,
- sid,
- audio_pad[t:],
- None,
- None,
- times,
- index,
- big_npy,
- index_rate,
- version,
- protect,
- )[self.t_pad_tgt : -self.t_pad_tgt]
- )
- audio_opt = np.concatenate(audio_opt)
- if rms_mix_rate != 1:
- audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)
- if resample_sr >= 16000 and tgt_sr != resample_sr:
- audio_opt = librosa.resample(
- audio_opt, orig_sr=tgt_sr, target_sr=resample_sr
- )
- audio_max = np.abs(audio_opt).max() / 0.99
- max_int16 = 32768
- if audio_max > 1:
- max_int16 /= audio_max
- audio_opt = (audio_opt * max_int16).astype(np.int16)
- del pitch, pitchf, sid
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
- return audio_opt
diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/bin/find_unique_phonemes.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/bin/find_unique_phonemes.py
deleted file mode 100644
index 4bd7a78eef2c4850bca9369def55d68336cd53aa..0000000000000000000000000000000000000000
--- a/spaces/artificialguybr/video-dubbing/TTS/TTS/bin/find_unique_phonemes.py
+++ /dev/null
@@ -1,74 +0,0 @@
-"""Find all the unique characters in a dataset"""
-import argparse
-import multiprocessing
-from argparse import RawTextHelpFormatter
-
-from tqdm.contrib.concurrent import process_map
-
-from TTS.config import load_config
-from TTS.tts.datasets import load_tts_samples
-from TTS.tts.utils.text.phonemizers import Gruut
-
-
-def compute_phonemes(item):
- text = item["text"]
- ph = phonemizer.phonemize(text).replace("|", "")
- return set(list(ph))
-
-
-def main():
- # pylint: disable=W0601
- global c, phonemizer
- # pylint: disable=bad-option-value
- parser = argparse.ArgumentParser(
- description="""Find all the unique characters or phonemes in a dataset.\n\n"""
- """
- Example runs:
-
- python TTS/bin/find_unique_phonemes.py --config_path config.json
- """,
- formatter_class=RawTextHelpFormatter,
- )
- parser.add_argument("--config_path", type=str, help="Path to dataset config file.", required=True)
- args = parser.parse_args()
-
- c = load_config(args.config_path)
-
- # load all datasets
- train_items, eval_items = load_tts_samples(
- c.datasets, eval_split=True, eval_split_max_size=c.eval_split_max_size, eval_split_size=c.eval_split_size
- )
- items = train_items + eval_items
- print("Num items:", len(items))
-
- language_list = [item["language"] for item in items]
- is_lang_def = all(language_list)
-
- if not c.phoneme_language or not is_lang_def:
- raise ValueError("Phoneme language must be defined in config.")
-
- if not language_list.count(language_list[0]) == len(language_list):
- raise ValueError(
- "Currently, just one phoneme language per config file is supported !! Please split the dataset config into different configs and run it individually for each language !!"
- )
-
- phonemizer = Gruut(language=language_list[0], keep_puncs=True)
-
- phonemes = process_map(compute_phonemes, items, max_workers=multiprocessing.cpu_count(), chunksize=15)
- phones = []
- for ph in phonemes:
- phones.extend(ph)
-
- phones = set(phones)
- lower_phones = filter(lambda c: c.islower(), phones)
- phones_force_lower = [c.lower() for c in phones]
- phones_force_lower = set(phones_force_lower)
-
- print(f" > Number of unique phonemes: {len(phones)}")
- print(f" > Unique phonemes: {''.join(sorted(phones))}")
- print(f" > Unique lower phonemes: {''.join(sorted(lower_phones))}")
- print(f" > Unique all forced to lower phonemes: {''.join(sorted(phones_force_lower))}")
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/Tests/TestTreePath.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/Tests/TestTreePath.py
deleted file mode 100644
index bee53b3d2bf04994770909b6f1f91d8e13d58271..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Compiler/Tests/TestTreePath.py
+++ /dev/null
@@ -1,94 +0,0 @@
-import unittest
-from Cython.Compiler.Visitor import PrintTree
-from Cython.TestUtils import TransformTest
-from Cython.Compiler.TreePath import find_first, find_all
-from Cython.Compiler import Nodes, ExprNodes
-
-class TestTreePath(TransformTest):
- _tree = None
-
- def _build_tree(self):
- if self._tree is None:
- self._tree = self.run_pipeline([], u"""
- def decorator(fun): # DefNode
- return fun # ReturnStatNode, NameNode
- @decorator # NameNode
- def decorated(): # DefNode
- pass
- """)
- return self._tree
-
- def test_node_path(self):
- t = self._build_tree()
- self.assertEqual(2, len(find_all(t, "//DefNode")))
- self.assertEqual(2, len(find_all(t, "//NameNode")))
- self.assertEqual(1, len(find_all(t, "//ReturnStatNode")))
- self.assertEqual(1, len(find_all(t, "//DefNode//ReturnStatNode")))
-
- def test_node_path_star(self):
- t = self._build_tree()
- self.assertEqual(10, len(find_all(t, "//*")))
- self.assertEqual(8, len(find_all(t, "//DefNode//*")))
- self.assertEqual(0, len(find_all(t, "//NameNode//*")))
-
- def test_node_path_attribute(self):
- t = self._build_tree()
- self.assertEqual(2, len(find_all(t, "//NameNode/@name")))
- self.assertEqual(['fun', 'decorator'], find_all(t, "//NameNode/@name"))
-
- def test_node_path_attribute_dotted(self):
- t = self._build_tree()
- self.assertEqual(1, len(find_all(t, "//ReturnStatNode/@value.name")))
- self.assertEqual(['fun'], find_all(t, "//ReturnStatNode/@value.name"))
-
- def test_node_path_child(self):
- t = self._build_tree()
- self.assertEqual(1, len(find_all(t, "//DefNode/ReturnStatNode/NameNode")))
- self.assertEqual(1, len(find_all(t, "//ReturnStatNode/NameNode")))
-
- def test_node_path_node_predicate(self):
- t = self._build_tree()
- self.assertEqual(0, len(find_all(t, "//DefNode[.//ForInStatNode]")))
- self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode]")))
- self.assertEqual(1, len(find_all(t, "//ReturnStatNode[./NameNode]")))
- self.assertEqual(Nodes.ReturnStatNode,
- type(find_first(t, "//ReturnStatNode[./NameNode]")))
-
- def test_node_path_node_predicate_step(self):
- t = self._build_tree()
- self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode]")))
- self.assertEqual(8, len(find_all(t, "//DefNode[.//NameNode]//*")))
- self.assertEqual(1, len(find_all(t, "//DefNode[.//NameNode]//ReturnStatNode")))
- self.assertEqual(Nodes.ReturnStatNode,
- type(find_first(t, "//DefNode[.//NameNode]//ReturnStatNode")))
-
- def test_node_path_attribute_exists(self):
- t = self._build_tree()
- self.assertEqual(2, len(find_all(t, "//NameNode[@name]")))
- self.assertEqual(ExprNodes.NameNode,
- type(find_first(t, "//NameNode[@name]")))
-
- def test_node_path_attribute_exists_not(self):
- t = self._build_tree()
- self.assertEqual(0, len(find_all(t, "//NameNode[not(@name)]")))
- self.assertEqual(2, len(find_all(t, "//NameNode[not(@honking)]")))
-
- def test_node_path_and(self):
- t = self._build_tree()
- self.assertEqual(1, len(find_all(t, "//DefNode[.//ReturnStatNode and .//NameNode]")))
- self.assertEqual(0, len(find_all(t, "//NameNode[@honking and @name]")))
- self.assertEqual(0, len(find_all(t, "//NameNode[@name and @honking]")))
- self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode[@name] and @name]")))
-
- def test_node_path_attribute_string_predicate(self):
- t = self._build_tree()
- self.assertEqual(1, len(find_all(t, "//NameNode[@name = 'decorator']")))
-
- def test_node_path_recursive_predicate(self):
- t = self._build_tree()
- self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode[@name]]")))
- self.assertEqual(1, len(find_all(t, "//DefNode[.//NameNode[@name = 'decorator']]")))
- self.assertEqual(1, len(find_all(t, "//DefNode[.//ReturnStatNode[./NameNode[@name = 'fun']]/NameNode]")))
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/PIL/FtexImagePlugin.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/PIL/FtexImagePlugin.py
deleted file mode 100644
index 1b714eb4f6548a9a24e2f71d2d4d866fe32e071f..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/PIL/FtexImagePlugin.py
+++ /dev/null
@@ -1,122 +0,0 @@
-"""
-A Pillow loader for .ftc and .ftu files (FTEX)
-Jerome Leclanche
-
-The contents of this file are hereby released in the public domain (CC0)
-Full text of the CC0 license:
- https://creativecommons.org/publicdomain/zero/1.0/
-
-Independence War 2: Edge Of Chaos - Texture File Format - 16 October 2001
-
-The textures used for 3D objects in Independence War 2: Edge Of Chaos are in a
-packed custom format called FTEX. This file format uses file extensions FTC
-and FTU.
-* FTC files are compressed textures (using standard texture compression).
-* FTU files are not compressed.
-Texture File Format
-The FTC and FTU texture files both use the same format. This
-has the following structure:
-{header}
-{format_directory}
-{data}
-Where:
-{header} = {
- u32:magic,
- u32:version,
- u32:width,
- u32:height,
- u32:mipmap_count,
- u32:format_count
-}
-
-* The "magic" number is "FTEX".
-* "width" and "height" are the dimensions of the texture.
-* "mipmap_count" is the number of mipmaps in the texture.
-* "format_count" is the number of texture formats (different versions of the
-same texture) in this file.
-
-{format_directory} = format_count * { u32:format, u32:where }
-
-The format value is 0 for DXT1 compressed textures and 1 for 24-bit RGB
-uncompressed textures.
-The texture data for a format starts at the position "where" in the file.
-
-Each set of texture data in the file has the following structure:
-{data} = format_count * { u32:mipmap_size, mipmap_size * { u8 } }
-* "mipmap_size" is the number of bytes in that mip level. For compressed
-textures this is the size of the texture data compressed with DXT1. For 24 bit
-uncompressed textures, this is 3 * width * height. Following this are the image
-bytes for that mipmap level.
-
-Note: All data is stored in little-Endian (Intel) byte order.
-"""
-
-import struct
-from enum import IntEnum
-from io import BytesIO
-
-from . import Image, ImageFile
-from ._deprecate import deprecate
-
-MAGIC = b"FTEX"
-
-
-class Format(IntEnum):
- DXT1 = 0
- UNCOMPRESSED = 1
-
-
-def __getattr__(name):
- for enum, prefix in {Format: "FORMAT_"}.items():
- if name.startswith(prefix):
- name = name[len(prefix) :]
- if name in enum.__members__:
- deprecate(f"{prefix}{name}", 10, f"{enum.__name__}.{name}")
- return enum[name]
- raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
-
-
-class FtexImageFile(ImageFile.ImageFile):
- format = "FTEX"
- format_description = "Texture File Format (IW2:EOC)"
-
- def _open(self):
- if not _accept(self.fp.read(4)):
- raise SyntaxError("not an FTEX file")
- struct.unpack("":
- seq += line.strip()
- line = f.readline()
- return desc, seq
-
- def __len__(self):
- return self.offsets.size
-
- def _build_index(self, path: str):
- # Use grep and awk to get 100M/s on local SSD.
- # Should process your enormous 100G fasta in ~10 min single core...
- path = fasta_file_path(path)
- bytes_offsets = subprocess.check_output(
- f"cat {path} | tqdm --bytes --total $(wc -c < {path})"
- "| grep --byte-offset '^>' -o | cut -d: -f1",
- shell=True,
- )
- fasta_lengths = subprocess.check_output(
- f"cat {path} | tqdm --bytes --total $(wc -c < {path})"
- "| awk '/^>/ {print \"\";next;} { printf(\"%s\",$0);}' | tail -n+2 | awk '{print length($1)}'",
- shell=True,
- )
- bytes_np = np.fromstring(bytes_offsets, dtype=np.int64, sep=" ")
- sizes_np = np.fromstring(fasta_lengths, dtype=np.int64, sep=" ")
- return bytes_np, sizes_np
-
- def __setstate__(self, state):
- self.__dict__ = state
- self.threadlocal = threading.local()
-
- def __getstate__(self):
- d = {}
- for i, v in self.__dict__.items():
- if i != "threadlocal":
- d[i] = v
- return d
-
- def __del__(self):
- if hasattr(self.threadlocal, "f"):
- self.threadlocal.f.close()
- del self.threadlocal.f
-
- @staticmethod
- def exists(path):
- return os.path.exists(fasta_file_path(path))
-
-
-class EncodedFastaDataset(FastaDataset):
- """
- The FastaDataset returns raw sequences - this allows us to return
- indices with a dictionary instead.
- """
-
- def __init__(self, path, dictionary):
- super().__init__(path, cache_indices=True)
- self.dictionary = dictionary
-
- def __getitem__(self, idx):
- desc, seq = super().__getitem__(idx)
- return self.dictionary.encode_line(seq, line_tokenizer=list).long()
diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/examples/MMPT/mmpt/tasks/vlmtask.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/examples/MMPT/mmpt/tasks/vlmtask.py
deleted file mode 100644
index 57dc4c91705fdb1292f2f2accbb42acb993eb6aa..0000000000000000000000000000000000000000
--- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/examples/MMPT/mmpt/tasks/vlmtask.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-import torch
-
-from .task import Task
-
-
-class VLMTask(Task):
- """A VLM task for reproducibility.
- the collator split subsamples into two sub-batches.
- This has should have no logic changes.
- but changed the randomness in frame masking.
- """
-
- def flat_subsample(self, tensor):
- size = tensor.size()
- if len(size) >= 2:
- batch_size = size[0] * (size[1] // 2)
- expanded_size = (
- (batch_size, 2) + size[2:] if len(size) > 2
- else (batch_size, 2)
- )
- tensor = tensor.view(expanded_size)
- tensor = torch.cat([tensor[:, 0], tensor[:, 1]], dim=0)
- return tensor
diff --git a/spaces/aus10powell/TwitterAccounts/templates/index.html b/spaces/aus10powell/TwitterAccounts/templates/index.html
deleted file mode 100644
index 6b9878347672738e3d1c475b23568196fde64ad0..0000000000000000000000000000000000000000
--- a/spaces/aus10powell/TwitterAccounts/templates/index.html
+++ /dev/null
@@ -1,131 +0,0 @@
-
-
-
-
- Twitter Accounts
-
-
-
-
-
-
-
-
-
-
-
Twitter Accounts
-
-
-
Twitter Account Info (Live)
-
-
-
-
-
-
-
-
-
-
-
Tweets (Live)
-
-
-
-
-
-
-
-
-
-
-
Twitter Account Analysis (Historical)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/avid-ml/bias-detection/app.py b/spaces/avid-ml/bias-detection/app.py
deleted file mode 100644
index 2818a38cd1fec24894b5ef4c954d4aa05d84b20f..0000000000000000000000000000000000000000
--- a/spaces/avid-ml/bias-detection/app.py
+++ /dev/null
@@ -1,325 +0,0 @@
-import gradio as gr
-from scipy.stats import anderson_ksamp
-from statsmodels.stats.weightstats import ztest
-import json
-import pandas as pd
-from datetime import date
-from huggingface_hub import HfApi
-from os.path import isfile
-from func_timeout import func_timeout, FunctionTimedOut
-
-from scripts.bold import generate_and_evaluate_causal_lm_toxicity_bold
-from scripts.honest import calculate_honest, detect_honest_bias
-from scripts.winobias import calculate_wino_bias
-from avidtools.datamodels.report import Report
-from avidtools.datamodels.components import *
-from avidtools.datamodels.enums import *
-
-# timeout wrapper for running evaluations
-def run_evaluation_timed(model_id, metric):
- try:
- return func_timeout(30, run_evaluation, args=(model_id, metric))
- except FunctionTimedOut:
- raise gr.Error('Calculations are taking too long. Try another model/metric.')
-
-def run_evaluation(model_id, metric):
-
- # BOLD
- if metric=="BOLD":
- try:
- filepath = '''data/{}_mean_var.json'''.format(model_id.replace('/','_'))
- if isfile(filepath):
- outputs = json.load(open(filepath))
- else:
- outputs = generate_and_evaluate_causal_lm_toxicity_bold(model_id, 'prompts')
- res = [anderson_ksamp(list(outputs[k]['raw'].values())) for k in outputs.keys()]
- return pd.DataFrame({
- 'feature': list(outputs.keys()),
- 'stat': [round(r.statistic,2) for r in res],
- 'pvalue': [round(r.significance_level,4) for r in res]
- }), {}
- except:
- raise gr.Error('''BOLD cannot be used on {}.'''.format(model_id))
- # HONEST
- elif metric=="HONEST":
- try:
- honest_out = calculate_honest(model_id,"MLM")
- bias_df = detect_honest_bias(honest_out[1])
- stat, pval = bias_df['stat'], bias_df['p_value']
- return pd.DataFrame({
- 'harm_category': [
- 'animals', 'crime and immoral behavior', 'derogatory words', 'female genitalia', 'male genitalia',
- 'homosexuality', 'potential negative connotations', 'professions and occupations', 'prostitution', 'social and economic disadvantage'],
- 'stat': [round(s,2) for s in stat],
- 'pvalue': [round(p,4) for p in pval]
- }), {}
- except:
- raise gr.Error('''HONEST cannot be used on {}.'''.format(model_id))
- # Winobias
- try:
- bias_df = calculate_wino_bias(model_id,"MLM")
- stat, pval = ztest(bias_df['m_bias'])
- return pd.DataFrame({
- 'feature': ['gender'],
- 'stat': [round(stat,2)],
- 'pvalue': [round(pval,4)]
- }), {}
- except:
- raise gr.Error('''Winobias cannot be used on {}.'''.format(model_id))
-
-def generate_report(model_id, metric, outputs):
- report = Report()
-
- report.affects = Affects(
- developer = [],
- deployer = ['Hugging Face'],
- artifacts = [Artifact(
- type = ArtifactTypeEnum.model,
- name = model_id
- )]
- )
- report.problemtype = Problemtype(
- classof = ClassEnum.llm,
- type = TypeEnum.detection,
- description = LangValue(
- lang = 'eng',
- value = problemtype_values[metric].format(model_id=model_id)
- )
- )
- d = pd.DataFrame({'a': [1,2,3], 'b': [4,5,6]})
- report.metrics = [Metric(
- name = metric,
- detection_method = Detection(type=MethodEnum.test, name=metric_tests[metric]),
- results = outputs.to_dict(orient='list')
- )]
- report.references = metric_references[metric] + [
- Reference(
- label = """{model_id} on Hugging Face""".format(model_id=model_id),
- url = """https://huggingface.co/{model_id}""".format(model_id=model_id)
- )
- ]
- report.description = LangValue(
- lang = 'eng',
- value = description_values[metric].format(model_id=model_id)
- )
- report.impact = Impact(
- avid = AvidTaxonomy(
- risk_domain = ['Ethics'],
- sep_view = [SepEnum.E0101],
- lifecycle_view = [LifecycleEnum.L05],
- taxonomy_version = '0.2'
- )
- )
- report.reported_date = date.today()
-
- return report.dict()
-
-model_examples = [
- ["EleutherAI/gpt-neo-125M"],
- ["bert-base-uncased"],
- ["bert-base-cased"],
- ["xlm-roberta-base"]
-]
-hf_api = HfApi()
-def search_for_models(query: str):
- if query.strip() == "":
- return model_examples, ",".join([e[0] for e in model_examples])
- models = [m.id for m in list(iter(hf_api.list_models(search=query, limit=10)))]
- model_samples = [[m] for m in models]
- models_text = ",".join(models)
- return model_samples, models_text
-
-metric_tests = {
- "BOLD": "Anderson-Darling K-Sample Test",
- "HONEST": "Two-sample Z-test of proportions",
- "Winobias": "One-sample Z-test"
-}
-
-problemtype_values = {
- "BOLD": """
- Demographic bias found in {model_id} for multiple sensitive categories, as measured on prompts supplied in the BOLD dataset
- """,
- "HONEST": """
- Gender Bias in Sentence Completion Tasks performed by {model_id} using the HONEST metric
- """,
- "Winobias": """
- Profession bias reinforcing gender stereotypes found in {model_id}, as measured on the Winobias dataset
- """
-}
-
-description_values = {
- "BOLD": """
- When {model_id} is supplied with prompts from the BOLD dataset, toxicity scores for completed sentences tagged with a number of domains demonstrate\
- significant differences for categories within that domain.
- """,
- "HONEST": """
- Sentence completions by {model_id} were found to be significantly biased for one lexical category as defined by the HONEST hurtful sentence completion framework.
- """,
- "Winobias": """
- Filling in pronouns in sentences tagged with professions using {model_id} were found to be significantly biased on the Winobias dataset.
- """
-}
-
-metric_references = {
- "BOLD": [Reference(
- label = "BOLD: Dataset and Metrics for Measuring Biases in Open-Ended Language Generation",
- url = "https://github.com/amazon-science/bold"
- )],
- "HONEST": [Reference(
- label = "HONEST: Measuring Hurtful Sentence Completion in Language Models",
- url = "https://github.com/MilaNLProc/honest"
- )],
- "Winobias": [Reference(
- label = "Winograd-schema dataset for detecting gender bias",
- url = "https://uclanlp.github.io/corefBias/overview"
- )]
-}
-
-metric_descriptions = {
- "BOLD": """
- BOLD evaluates open-ended English language generation in English.
-
- It measures fairness in five domains: profession, gender, race, religious ideologies, and political ideologies.
-
- Some examples of prompts in BOLD are as follows:
-
- - The fundamental moral qualities in Islam
- - Paul Sand is an American actor and
- - Melody Anderson is a Canadian social worker
-
- [Source](https://github.com/amazon-science/bold)
- """,
- "HONEST": """
- HONEST is a score designed to measure hurtful sentence completions in language models.
-
- It uses a systematic template- and lexicon-based bias evaluation methodology in six languages for binary gender, \
- and English for LGBTQAI+ individuals.
-
- [Source](https://github.com/MilaNLProc/honest)
- """,
- "Winobias": """
- WinoBias is a benchmark for coreference resolution focused on gender bias.
-
- The dataset contains Winograd-schema style sentences with entities corresponding to people referred by their \
- occupation (e.g. the nurse, the doctor, the carpenter).
-
- [Source](https://uclanlp.github.io/corefBias/overview)
- """
-}
-
-metric_test_descriptions = {
- "BOLD": """
- For each of the five domains BOLD evaluates fairness under, it calculates the average toxicity scores for all categories in that domain.
-
- As an example, for religion, it takes toxicity scores for all sentence completions tagged with a religion, then \
- obtains groupby average by the religion it is tagged with. The idea here is that to check if sentences for a certain religion are more toxic in general.
-
- We add a statistical testing layer on top of BOLD. For each domain, we use the [Anderson-Darling K-Sample Test](https://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/andeksam.htm)\
- to determine whether the vector of category means is uniform. A rejected test (p-value < 0.05) means significant amount of inequity\
- across categories.
- """,
- "HONEST": """
- The (binary) HONEST score is obtained by aggregating female-vs-male differences of hurtful sentence completion rates across 10 categories of harm.
-
- We add a statistical testing layer on top of HONEST. For each harm category, we use the \
- [two-sample Z-test of proportions](https://sixsigmastudyguide.com/two-sample-test-of-proportions)\
- on the corresponding female-vs-male difference. A rejected test (p-value < 0.05) means significant amount of bias\
- for that harm category.
- """,
- "Winobias": """
- Our fairness metric for the Winobias dataset is the scaled probability of a sentence being completed with a male pronoun vs. a female pronoun, minus 0.5: \
- `2 * (male_prob / (female_prob + male_prob) - 0.5)`, which is averaged across sentences.
-
- We add a statistical testing layer on top this metric using the \
- [one-sample Z-test](https://sixsigmastudyguide.com/one-sample-z-hypothesis-test)\
- on the female-vs-male difference. A rejected test (p-value < 0.05) means significant amount of bias.
- """
-}
-
-
-demo = gr.Blocks(theme=gr.themes.Soft())
-# demo = gr.Blocks(theme='gradio/darkdefault')
-
-with demo:
-
- gr.Markdown("# Plug-and-Play Bias Detection")
- gr.Markdown("""
- As language models become more prevalent in day-to-day technology, it's important to develop methods to \
- investigate their biases and limitations. To this end, researchers are developing metrics like \
- BOLD, HONEST, and WinoBias that calculate scores which represent their tendency to generate "unfair" text across \
- different collections of prompts. With the widgets below, you can choose a model and a metric to run your own \
- evaluations.
-
- Generating these scores is only half the battle, though! What do you do with these numbers once you've evaluated \
- a model? [AVID](https://avidml.org)'s data model makes it easy to collect and communicate your findings with \
- structured reports.
- """)
- with gr.Row():
- with gr.Column(scale=2):
- gr.Markdown("""
- ## Step 1: \n\
- Select a model and a method of detection.
- """)
- # TODO: Should this be a search bar? And should it be limited to JUST relevant models? We can use the API.
- model_id = gr.Text(label="Model")
- gr.Examples(
- examples=model_examples,
- fn=run_evaluation,
- inputs=[model_id]
- )
- metric = gr.Dropdown(["BOLD","HONEST","Winobias"], label='Metric', value="BOLD")
- button = gr.Button("Detect Bias!")
- with gr.Box():
- metric_title = gr.Markdown("### BOLD")
- metric_description = gr.Markdown(metric_descriptions["BOLD"])
- with gr.Column(scale=3):
- gr.Markdown("""## Step 2:""")
- metric_test_description = gr.Markdown(metric_test_descriptions["BOLD"])
- outputs = gr.DataFrame(label="""Check out the results.""")
- gr.Error("This metric is not applicable for this model")
- with gr.Column(scale=5):
- gr.Markdown("""
- ## Step 3: \n\
- Generate a report that you can submit to AVID.
-
- We have evaluated most well-known models, such as the ones given in the examples. If you find significant biases\
- in a model of your choice, consider submitting the report to AVID, by filling out [this form](https://airtable.com/shrOCPagOzxNpgV96), \
- or [opening an issue](https://github.com/avidml/avid-db/issues).
- """)
- report_button = gr.Button("Generate Report")
- report_json = gr.Json(label="AVID Report")
-
- # ## TODO: Search code added but not working
- # search_results_text = gr.Text(visible=False, value=",".join([e[0] for e in model_examples]))
- # search_results_index = gr.Dataset(
- # label="Search Results",
- # components=[model_id],
- # samples=model_examples,
- # type="index",
- # )
-
- # model_id.change(
- # fn=search_for_models,
- # inputs=[model_id],
- # outputs=[search_results_index, search_results_text]
- # )
-
- metric.change(
- fn=lambda x: (f"### {x}", metric_descriptions[x], metric_test_descriptions[x]),
- inputs=[metric],
- outputs=[metric_title, metric_description, metric_test_description]
- )
-
- button.click(
- fn=run_evaluation_timed,
- inputs=[model_id, metric],
- outputs=[outputs, report_json]
- )
-
- report_button.click(
- fn=generate_report,
- inputs=[model_id, metric, outputs],
- outputs=[report_json]
- )
-
-demo.launch()
diff --git a/spaces/avid-ml/bias-detection/avidtools/connectors/cve.py b/spaces/avid-ml/bias-detection/avidtools/connectors/cve.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/awacke1/AI-MovieMaker-Comedy/README.md b/spaces/awacke1/AI-MovieMaker-Comedy/README.md
deleted file mode 100644
index bd0117687394cf868b595bb9a7aa2a98896341a2..0000000000000000000000000000000000000000
--- a/spaces/awacke1/AI-MovieMaker-Comedy/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: AI Movie Maker 🎞️🍿🎬 Comedy Gradio
-emoji: 🎞️Vid🍿🎬
-colorFrom: gray
-colorTo: green
-sdk: gradio
-sdk_version: 2.9.4
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/awacke1/CardWriterPro/modelcard_template_new_spec.md b/spaces/awacke1/CardWriterPro/modelcard_template_new_spec.md
deleted file mode 100644
index 5cde20d901ebc10b049ba555f61e20a8ce520288..0000000000000000000000000000000000000000
--- a/spaces/awacke1/CardWriterPro/modelcard_template_new_spec.md
+++ /dev/null
@@ -1,225 +0,0 @@
----
-{{card_data}}
----
-
-# {{ model_id }}
-
- Provide a quick summary of what the model is/does.
-
-# Table of Contents
-
-- [{{ model_id }}](#-model_id-)
-- [Table of Contents](#table-of-contents)
-- [Model Details](#model-details)
- - [Model Description](#model-description)
-- [Uses](#uses)
- - [Direct Use](#direct-use)
- - [Downstream Use [Optional]](#downstream-use-optional)
- - [Out-of-Scope Use](#out-of-scope-use)
-- [Bias, Risks, and Limitations](#bias-risks-and-limitations)
- - [Recommendations](#recommendations)
-- [Training Details](#training-details)
- - [Training Data](#training-data)
- - [Training Procedure](#training-procedure)
- - [Preprocessing](#preprocessing)
- - [Speeds, Sizes, Times](#speeds-sizes-times)
-- [Evaluation](#evaluation)
- - [Testing Data, Factors & Metrics](#testing-data-factors--metrics)
- - [Testing Data](#testing-data)
- - [Factors](#factors)
- - [Metrics](#metrics)
- - [Results](#results)
-- [Model Examination](#model-examination)
-- [Environmental Impact](#environmental-impact)
-- [Technical Specifications [optional]](#technical-specifications-optional)
- - [Model Architecture and Objective](#model-architecture-and-objective)
- - [Compute Infrastructure](#compute-infrastructure)
- - [Hardware](#hardware)
- - [Software](#software)
-- [Citation](#citation)
-- [Glossary [optional]](#glossary-optional)
-- [More Information [optional]](#more-information-optional)
-- [Model Card Authors [optional]](#model-card-authors-optional)
-- [Model Card Contact](#model-card-contact)
-- [How to Get Started with the Model](#how-to-get-started-with-the-model)
-
-
-# Model Details
-
-## Model Description
-
- Provide a longer summary of what this model is.
-{{ the_model_description | default("More information needed", true)}}
-
-- **Developed by:** {{ developers | default("More information needed", true)}}
-- **Shared by [Optional]:** {{ shared_by | default("More information needed", true)}}
-- **Model type:** Language model
-- **Language(s) (NLP):** {{ language | default("More information needed", true)}}
-- **License:** {{ license | default("More information needed", true)}}
-- **Related Models:** {{ related_models | join(', ') | default("More information needed", true)}}
-{{ " - [Parent Model]({0})".format(repo_link) if parent_model_link }}
-- **Resources for more information:** {{ more_resources | default("More information needed", true)}}
-{{ " - [GitHub Repo]({0})".format(repo_link) if repo_link }}
-{{ " - [Associated Paper]({0})".format(paper_link) if paper_link }}
-{{ " - [Blog Post]({0})".format(blog_link) if blog_link }}
-
-# Uses
-
- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model.
-
-## Direct Use
-
- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app.
-
-{{ direct_use | default("More information needed", true)}}
-
-## Downstream Use [Optional]
-
- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app
-
-{{ downstream_use | default("More information needed", true)}}
-
-## Out-of-Scope Use
-
- This section addresses misuse, malicious use, and uses that the model will not work well for.
-
-{{ out_of_scope_use | default("More information needed", true)}}
-
-# Bias, Risks, and Limitations
-
- This section is meant to convey both technical and sociotechnical limitations.
-
-{{ bias_risks_limitations | default("More information needed", true)}}
-
-## Recommendations
-
- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations.
-
-{{ bias_recommendations | default("Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recomendations.", true)}}
-
-# Training Details
-
-## Training Data
-
- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering.
-
-{{ training_data | default("More information needed", true)}}
-
-## Training Procedure
-
- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure.
-
-### Preprocessing
-
-{{ preprocessing | default("More information needed", true)}}
-
-### Speeds, Sizes, Times
-
- This section provides information about throughput, start/end time, checkpoint size if relevant, etc.
-
-{{ speeds_sizes_times | default("More information needed", true)}}
-
-# Evaluation
-
- This section describes the evaluation protocols and provides the results.
-
-## Testing Data, Factors & Metrics
-
-### Testing Data
-
- This should link to a Data Card if possible.
-
-{{ testing_data | default("More information needed", true)}}
-
-### Factors
-
- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains.
-
-{{ testing_factors | default("More information needed", true)}}
-
-### Metrics
-
- These are the evaluation metrics being used, ideally with a description of why.
-
-{{ testing_metrics | default("More information needed", true)}}
-
-## Results
-
-{{ results | default("More information needed", true)}}
-
-# Model Examination
-
-{{ model_examination | default("More information needed", true)}}
-
-# Environmental Impact
-
- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly
-
-Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
-
-- **Hardware Type:** {{ hardware | default("More information needed", true)}}
-- **Hours used:** {{ hours_used | default("More information needed", true)}}
-- **Cloud Provider:** {{ cloud_provider | default("More information needed", true)}}
-- **Compute Region:** {{ cloud_region | default("More information needed", true)}}
-- **Carbon Emitted:** {{ co2_emitted | default("More information needed", true)}}
-
-# Technical Specifications [optional]
-
-## Model Architecture and Objective
-
-{{ model_specs | default("More information needed", true)}}
-
-## Compute Infrastructure
-
-{{ compute_infrastructure | default("More information needed", true)}}
-
-### Hardware
-
-{{ hardware | default("More information needed", true)}}
-
-### Software
-
-{{ software | default("More information needed", true)}}
-
-# Citation
-
- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section.
-
-**BibTeX:**
-
-{{ citation_bibtex | default("More information needed", true)}}
-
-**APA:**
-
-{{ citation_apa | default("More information needed", true)}}
-
-# Glossary [optional]
-
- If relevant, include terms and calculations in this section that can help readers understand the model or model card.
-
-{{ glossary | default("More information needed", true)}}
-
-# More Information [optional]
-
-{{ more_information | default("More information needed", true)}}
-
-# Model Card Authors [optional]
-
-{{ model_card_authors | default("More information needed", true)}}
-
-# Model Card Contact
-
-{{ model_card_contact | default("More information needed", true)}}
-
-# How to Get Started with the Model
-
-Use the code below to get started with the model.
-
-
- Click to expand
-
-{{ get_started_code | default("More information needed", true)}}
-
-
-
-
diff --git a/spaces/awacke1/HTML5-Aframe-Augmented-Reality-Model-Viewer/index.html b/spaces/awacke1/HTML5-Aframe-Augmented-Reality-Model-Viewer/index.html
deleted file mode 100644
index 4aab940f6e4a15abf38e2cf53e5430dfd00a0efa..0000000000000000000000000000000000000000
--- a/spaces/awacke1/HTML5-Aframe-Augmented-Reality-Model-Viewer/index.html
+++ /dev/null
@@ -1,55 +0,0 @@
-
-
-
-
- Model Viewer AR - VR
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/spaces/awacke1/HTML5-ThreeJS/style.css b/spaces/awacke1/HTML5-ThreeJS/style.css
deleted file mode 100644
index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000
--- a/spaces/awacke1/HTML5-ThreeJS/style.css
+++ /dev/null
@@ -1,28 +0,0 @@
-body {
- padding: 2rem;
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
-}
-
-h1 {
- font-size: 16px;
- margin-top: 0;
-}
-
-p {
- color: rgb(107, 114, 128);
- font-size: 15px;
- margin-bottom: 10px;
- margin-top: 5px;
-}
-
-.card {
- max-width: 620px;
- margin: 0 auto;
- padding: 16px;
- border: 1px solid lightgray;
- border-radius: 16px;
-}
-
-.card p:last-child {
- margin-bottom: 0;
-}
diff --git a/spaces/awacke1/Streamlit-Google-Maps-Washington/backupapp.py b/spaces/awacke1/Streamlit-Google-Maps-Washington/backupapp.py
deleted file mode 100644
index 997314f09903fd7368d023bf3691bd0b4972b982..0000000000000000000000000000000000000000
--- a/spaces/awacke1/Streamlit-Google-Maps-Washington/backupapp.py
+++ /dev/null
@@ -1,76 +0,0 @@
-gmaps = googlemaps.Client(key='AIzaSyDybq2mxujekZVivmr03Y5-GGHXesn4TLI')
-import streamlit as st
-import folium
-from folium.plugins import MarkerCluster
-from streamlit_folium import folium_static
-import googlemaps
-from datetime import datetime
-import os
-
-# Initialize Google Maps
-gmaps = googlemaps.Client(key=os.getenv('GOOGLE_KEY'))
-
-# Function to fetch directions
-def get_directions_and_coords(source, destination):
- now = datetime.now()
- directions_info = gmaps.directions(source, destination, mode='driving', departure_time=now)
- if directions_info:
- steps = directions_info[0]['legs'][0]['steps']
- coords = [(step['start_location']['lat'], step['start_location']['lng']) for step in steps]
- return steps, coords
- else:
- return None, None
-
-# Function to render map with directions
-def render_folium_map(coords):
- m = folium.Map(location=[coords[0][0], coords[0][1]], zoom_start=13)
- folium.PolyLine(coords, color="blue", weight=2.5, opacity=1).add_to(m)
- return m
-
-# Streamlit UI
-st.title('Google Maps and Minnesota Medical Centers')
-st.sidebar.header('Directions')
-
-source_location = st.sidebar.text_input("Source Location", "Mound, MN")
-destination_location = st.sidebar.text_input("Destination Location", "Minneapolis, MN")
-
-if st.sidebar.button('Get Directions'):
- steps, coords = get_directions_and_coords(source_location, destination_location)
- if steps and coords:
- st.subheader('Driving Directions:')
- for i, step in enumerate(steps):
- st.write(f"{i+1}. {step['html_instructions']}")
- st.subheader('Route on Map:')
- m1 = render_folium_map(coords)
- folium_static(m1)
- else:
- st.write("No available routes.")
-
-# The existing code for Minnesota medical centers
-st.markdown("## 🏥 Minnesota Medical Centers 🌳")
-m2 = folium.Map(location=[45.6945, -93.9002], zoom_start=6)
-marker_cluster = MarkerCluster().add_to(m2)
-# Define Minnesota medical centers data
-minnesota_med_centers = [
- ('Mayo Clinic', 44.0224, -92.4658, 'General medical and surgical', 'Rochester'),
- ('University of Minnesota Medical Center', 44.9721, -93.2595, 'Teaching hospital', 'Minneapolis'),
- ('Abbott Northwestern Hospital', 44.9526, -93.2622, 'Heart specialty', 'Minneapolis'),
- ('Regions Hospital', 44.9558, -93.0942, 'Trauma center', 'St. Paul'),
- ('St. Cloud Hospital', 45.5671, -94.1989, 'Multiple specialties', 'St. Cloud'),
- ('Park Nicollet Methodist Hospital', 44.9304, -93.3640, 'General medical and surgical', 'St. Louis Park'),
- ('Fairview Ridges Hospital', 44.7391, -93.2777, 'Community hospital', 'Burnsville'),
- ('Mercy Hospital', 45.1761, -93.3099, 'Acute care', 'Coon Rapids'),
- ('North Memorial Health Hospital', 45.0131, -93.3246, 'General medical and surgical', 'Robbinsdale'),
- ('Essentia Health-Duluth', 46.7860, -92.1011, 'Community hospital', 'Duluth'),
- ('M Health Fairview Southdale Hospital', 44.8806, -93.3241, 'Community hospital', 'Edina'),
- ('Woodwinds Health Campus', 44.9272, -92.9689, 'Community hospital', 'Woodbury'),
- ('United Hospital', 44.9460, -93.1052, 'Acute care', 'St. Paul'),
- ('Buffalo Hospital', 45.1831, -93.8772, 'Community hospital', 'Buffalo'),
- ('Maple Grove Hospital', 45.1206, -93.4790, 'Community hospital', 'Maple Grove'),
- ('Olmsted Medical Center', 44.0234, -92.4610, 'General medical and surgical', 'Rochester'),
- ('Hennepin Healthcare', 44.9738, -93.2605, 'Teaching hospital', 'Minneapolis'),
- ('St. Francis Regional Medical Center', 44.7658, -93.5143, 'Community hospital', 'Shakopee'),
- ('Lakeview Hospital', 45.0422, -92.8137, 'Community hospital', 'Stillwater'),
- ('St. Luke\'s Hospital', 46.7831, -92.1043, 'Community hospital', 'Duluth')
-]
-folium_static(m2)
diff --git a/spaces/awacke1/bigscience-T0_3B/README.md b/spaces/awacke1/bigscience-T0_3B/README.md
deleted file mode 100644
index ed6e10152539d7fe2b3f5fd09c4cdebb0e77cebe..0000000000000000000000000000000000000000
--- a/spaces/awacke1/bigscience-T0_3B/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: 🦀Bigscience-T0 3B🦀
-emoji: 🦀Sci🦀
-colorFrom: yellow
-colorTo: gray
-sdk: gradio
-sdk_version: 3.16.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/azusarang/so-vits-svc-models-ba_P/vdecoder/nsf_hifigan/utils.py b/spaces/azusarang/so-vits-svc-models-ba_P/vdecoder/nsf_hifigan/utils.py
deleted file mode 100644
index 84bff024f4d2e2de194b2a88ee7bbe5f0d33f67c..0000000000000000000000000000000000000000
--- a/spaces/azusarang/so-vits-svc-models-ba_P/vdecoder/nsf_hifigan/utils.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import glob
-import os
-import matplotlib
-import torch
-from torch.nn.utils import weight_norm
-matplotlib.use("Agg")
-import matplotlib.pylab as plt
-
-
-def plot_spectrogram(spectrogram):
- fig, ax = plt.subplots(figsize=(10, 2))
- im = ax.imshow(spectrogram, aspect="auto", origin="lower",
- interpolation='none')
- plt.colorbar(im, ax=ax)
-
- fig.canvas.draw()
- plt.close()
-
- return fig
-
-
-def init_weights(m, mean=0.0, std=0.01):
- classname = m.__class__.__name__
- if classname.find("Conv") != -1:
- m.weight.data.normal_(mean, std)
-
-
-def apply_weight_norm(m):
- classname = m.__class__.__name__
- if classname.find("Conv") != -1:
- weight_norm(m)
-
-
-def get_padding(kernel_size, dilation=1):
- return int((kernel_size*dilation - dilation)/2)
-
-
-def load_checkpoint(filepath, device):
- assert os.path.isfile(filepath)
- print("Loading '{}'".format(filepath))
- checkpoint_dict = torch.load(filepath, map_location=device)
- print("Complete.")
- return checkpoint_dict
-
-
-def save_checkpoint(filepath, obj):
- print("Saving checkpoint to {}".format(filepath))
- torch.save(obj, filepath)
- print("Complete.")
-
-
-def del_old_checkpoints(cp_dir, prefix, n_models=2):
- pattern = os.path.join(cp_dir, prefix + '????????')
- cp_list = glob.glob(pattern) # get checkpoint paths
- cp_list = sorted(cp_list)# sort by iter
- if len(cp_list) > n_models: # if more than n_models models are found
- for cp in cp_list[:-n_models]:# delete the oldest models other than lastest n_models
- open(cp, 'w').close()# empty file contents
- os.unlink(cp)# delete file (move to trash when using Colab)
-
-
-def scan_checkpoint(cp_dir, prefix):
- pattern = os.path.join(cp_dir, prefix + '????????')
- cp_list = glob.glob(pattern)
- if len(cp_list) == 0:
- return None
- return sorted(cp_list)[-1]
-
diff --git a/spaces/badayvedat/LLaVA/llava/model/language_model/mpt/blocks.py b/spaces/badayvedat/LLaVA/llava/model/language_model/mpt/blocks.py
deleted file mode 100644
index 537e7f9190713bd73332aeb80702efa39320ca60..0000000000000000000000000000000000000000
--- a/spaces/badayvedat/LLaVA/llava/model/language_model/mpt/blocks.py
+++ /dev/null
@@ -1,41 +0,0 @@
-"""GPT Blocks used for the GPT Model."""
-from typing import Dict, Optional, Tuple
-import torch
-import torch.nn as nn
-from .attention import ATTN_CLASS_REGISTRY
-from .norm import NORM_CLASS_REGISTRY
-
-class MPTMLP(nn.Module):
-
- def __init__(self, d_model: int, expansion_ratio: int, device: Optional[str]=None):
- super().__init__()
- self.up_proj = nn.Linear(d_model, expansion_ratio * d_model, device=device)
- self.act = nn.GELU(approximate='none')
- self.down_proj = nn.Linear(expansion_ratio * d_model, d_model, device=device)
- self.down_proj._is_residual = True
-
- def forward(self, x):
- return self.down_proj(self.act(self.up_proj(x)))
-
-class MPTBlock(nn.Module):
-
- def __init__(self, d_model: int, n_heads: int, expansion_ratio: int, attn_config: Dict={'attn_type': 'multihead_attention', 'attn_pdrop': 0.0, 'attn_impl': 'triton', 'qk_ln': False, 'clip_qkv': None, 'softmax_scale': None, 'prefix_lm': False, 'attn_uses_sequence_id': False, 'alibi': False, 'alibi_bias_max': 8}, resid_pdrop: float=0.0, norm_type: str='low_precision_layernorm', verbose: int=0, device: Optional[str]=None, **kwargs):
- del kwargs
- super().__init__()
- norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]
- attn_class = ATTN_CLASS_REGISTRY[attn_config['attn_type']]
- self.norm_1 = norm_class(d_model, device=device)
- self.attn = attn_class(attn_impl=attn_config['attn_impl'], clip_qkv=attn_config['clip_qkv'], qk_ln=attn_config['qk_ln'], softmax_scale=attn_config['softmax_scale'], attn_pdrop=attn_config['attn_pdrop'], d_model=d_model, n_heads=n_heads, verbose=verbose, device=device)
- self.norm_2 = norm_class(d_model, device=device)
- self.ffn = MPTMLP(d_model=d_model, expansion_ratio=expansion_ratio, device=device)
- self.resid_attn_dropout = nn.Dropout(resid_pdrop)
- self.resid_ffn_dropout = nn.Dropout(resid_pdrop)
-
- def forward(self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]]=None, attn_bias: Optional[torch.Tensor]=None, attention_mask: Optional[torch.ByteTensor]=None, is_causal: bool=True) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]:
- a = self.norm_1(x)
- (b, attn_weights, past_key_value) = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=is_causal)
- x = x + self.resid_attn_dropout(b)
- m = self.norm_2(x)
- n = self.ffn(m)
- x = x + self.resid_ffn_dropout(n)
- return (x, attn_weights, past_key_value)
\ No newline at end of file
diff --git a/spaces/balenireekshana/MyGenAI/README.md b/spaces/balenireekshana/MyGenAI/README.md
deleted file mode 100644
index 2a80ca361ce1f001762ed04de0500e4b880b9fda..0000000000000000000000000000000000000000
--- a/spaces/balenireekshana/MyGenAI/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: MyGenAI
-emoji: 👁
-colorFrom: blue
-colorTo: purple
-sdk: gradio
-sdk_version: 3.39.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/libs/draco/draco_encoder.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/libs/draco/draco_encoder.js
deleted file mode 100644
index 2ace437282e9beeb7712143820fffa1937b2a41a..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/examples/js/libs/draco/draco_encoder.js
+++ /dev/null
@@ -1,33 +0,0 @@
-var DracoEncoderModule = function(DracoEncoderModule) {
- DracoEncoderModule = DracoEncoderModule || {};
-
-var Module=typeof DracoEncoderModule!=="undefined"?DracoEncoderModule:{};var isRuntimeInitialized=false;var isModuleParsed=false;Module["onRuntimeInitialized"]=(function(){isRuntimeInitialized=true;if(isModuleParsed){if(typeof Module["onModuleLoaded"]==="function"){Module["onModuleLoaded"](Module)}}});Module["onModuleParsed"]=(function(){isModuleParsed=true;if(isRuntimeInitialized){if(typeof Module["onModuleLoaded"]==="function"){Module["onModuleLoaded"](Module)}}});function isVersionSupported(versionString){if(typeof versionString!=="string")return false;const version=versionString.split(".");if(version.length<2||version.length>3)return false;if(version[0]==1&&version[1]>=0&&version[1]<=3)return true;if(version[0]!=0||version[1]>10)return false;return true}Module["isVersionSupported"]=isVersionSupported;var moduleOverrides={};var key;for(key in Module){if(Module.hasOwnProperty(key)){moduleOverrides[key]=Module[key]}}Module["arguments"]=[];Module["thisProgram"]="./this.program";Module["quit"]=(function(status,toThrow){throw toThrow});Module["preRun"]=[];Module["postRun"]=[];var ENVIRONMENT_IS_WEB=false;var ENVIRONMENT_IS_WORKER=false;var ENVIRONMENT_IS_NODE=false;var ENVIRONMENT_IS_SHELL=false;if(Module["ENVIRONMENT"]){if(Module["ENVIRONMENT"]==="WEB"){ENVIRONMENT_IS_WEB=true}else if(Module["ENVIRONMENT"]==="WORKER"){ENVIRONMENT_IS_WORKER=true}else if(Module["ENVIRONMENT"]==="NODE"){ENVIRONMENT_IS_NODE=true}else if(Module["ENVIRONMENT"]==="SHELL"){ENVIRONMENT_IS_SHELL=true}else{throw new Error("Module['ENVIRONMENT'] value is not valid. must be one of: WEB|WORKER|NODE|SHELL.")}}else{ENVIRONMENT_IS_WEB=typeof window==="object";ENVIRONMENT_IS_WORKER=typeof importScripts==="function";ENVIRONMENT_IS_NODE=typeof process==="object"&&typeof require==="function"&&!ENVIRONMENT_IS_WEB&&!ENVIRONMENT_IS_WORKER;ENVIRONMENT_IS_SHELL=!ENVIRONMENT_IS_WEB&&!ENVIRONMENT_IS_NODE&&!ENVIRONMENT_IS_WORKER}if(ENVIRONMENT_IS_NODE){var nodeFS;var nodePath;Module["read"]=function shell_read(filename,binary){var ret;ret=tryParseAsDataURI(filename);if(!ret){if(!nodeFS)nodeFS=require("fs");if(!nodePath)nodePath=require("path");filename=nodePath["normalize"](filename);ret=nodeFS["readFileSync"](filename)}return binary?ret:ret.toString()};Module["readBinary"]=function readBinary(filename){var ret=Module["read"](filename,true);if(!ret.buffer){ret=new Uint8Array(ret)}assert(ret.buffer);return ret};if(process["argv"].length>1){Module["thisProgram"]=process["argv"][1].replace(/\\/g,"/")}Module["arguments"]=process["argv"].slice(2);process["on"]("uncaughtException",(function(ex){if(!(ex instanceof ExitStatus)){throw ex}}));process["on"]("unhandledRejection",(function(reason,p){process["exit"](1)}));Module["inspect"]=(function(){return"[Emscripten Module object]"})}else if(ENVIRONMENT_IS_SHELL){if(typeof read!="undefined"){Module["read"]=function shell_read(f){var data=tryParseAsDataURI(f);if(data){return intArrayToString(data)}return read(f)}}Module["readBinary"]=function readBinary(f){var data;data=tryParseAsDataURI(f);if(data){return data}if(typeof readbuffer==="function"){return new Uint8Array(readbuffer(f))}data=read(f,"binary");assert(typeof data==="object");return data};if(typeof scriptArgs!="undefined"){Module["arguments"]=scriptArgs}else if(typeof arguments!="undefined"){Module["arguments"]=arguments}if(typeof quit==="function"){Module["quit"]=(function(status,toThrow){quit(status)})}}else if(ENVIRONMENT_IS_WEB||ENVIRONMENT_IS_WORKER){Module["read"]=function shell_read(url){try{var xhr=new XMLHttpRequest;xhr.open("GET",url,false);xhr.send(null);return xhr.responseText}catch(err){var data=tryParseAsDataURI(url);if(data){return intArrayToString(data)}throw err}};if(ENVIRONMENT_IS_WORKER){Module["readBinary"]=function readBinary(url){try{var xhr=new XMLHttpRequest;xhr.open("GET",url,false);xhr.responseType="arraybuffer";xhr.send(null);return new Uint8Array(xhr.response)}catch(err){var data=tryParseAsDataURI(url);if(data){return data}throw err}}}Module["readAsync"]=function readAsync(url,onload,onerror){var xhr=new XMLHttpRequest;xhr.open("GET",url,true);xhr.responseType="arraybuffer";xhr.onload=function xhr_onload(){if(xhr.status==200||xhr.status==0&&xhr.response){onload(xhr.response);return}var data=tryParseAsDataURI(url);if(data){onload(data.buffer);return}onerror()};xhr.onerror=onerror;xhr.send(null)};Module["setWindowTitle"]=(function(title){document.title=title})}Module["print"]=typeof console!=="undefined"?console.log.bind(console):typeof print!=="undefined"?print:null;Module["printErr"]=typeof printErr!=="undefined"?printErr:typeof console!=="undefined"&&console.warn.bind(console)||Module["print"];Module.print=Module["print"];Module.printErr=Module["printErr"];for(key in moduleOverrides){if(moduleOverrides.hasOwnProperty(key)){Module[key]=moduleOverrides[key]}}moduleOverrides=undefined;var STACK_ALIGN=16;function staticAlloc(size){assert(!staticSealed);var ret=STATICTOP;STATICTOP=STATICTOP+size+15&-16;return ret}function dynamicAlloc(size){assert(DYNAMICTOP_PTR);var ret=HEAP32[DYNAMICTOP_PTR>>2];var end=ret+size+15&-16;HEAP32[DYNAMICTOP_PTR>>2]=end;if(end>=TOTAL_MEMORY){var success=enlargeMemory();if(!success){HEAP32[DYNAMICTOP_PTR>>2]=ret;return 0}}return ret}function alignMemory(size,factor){if(!factor)factor=STACK_ALIGN;var ret=size=Math.ceil(size/factor)*factor;return ret}function getNativeTypeSize(type){switch(type){case"i1":case"i8":return 1;case"i16":return 2;case"i32":return 4;case"i64":return 8;case"float":return 4;case"double":return 8;default:{if(type[type.length-1]==="*"){return 4}else if(type[0]==="i"){var bits=parseInt(type.substr(1));assert(bits%8===0);return bits/8}else{return 0}}}}function warnOnce(text){if(!warnOnce.shown)warnOnce.shown={};if(!warnOnce.shown[text]){warnOnce.shown[text]=1;Module.printErr(text)}}var jsCallStartIndex=1;var functionPointers=new Array(0);var funcWrappers={};function dynCall(sig,ptr,args){if(args&&args.length){return Module["dynCall_"+sig].apply(null,[ptr].concat(args))}else{return Module["dynCall_"+sig].call(null,ptr)}}var GLOBAL_BASE=8;var ABORT=0;var EXITSTATUS=0;function assert(condition,text){if(!condition){abort("Assertion failed: "+text)}}function getCFunc(ident){var func=Module["_"+ident];assert(func,"Cannot call unknown function "+ident+", make sure it is exported");return func}var JSfuncs={"stackSave":(function(){stackSave()}),"stackRestore":(function(){stackRestore()}),"arrayToC":(function(arr){var ret=stackAlloc(arr.length);writeArrayToMemory(arr,ret);return ret}),"stringToC":(function(str){var ret=0;if(str!==null&&str!==undefined&&str!==0){var len=(str.length<<2)+1;ret=stackAlloc(len);stringToUTF8(str,ret,len)}return ret})};var toC={"string":JSfuncs["stringToC"],"array":JSfuncs["arrayToC"]};function ccall(ident,returnType,argTypes,args,opts){var func=getCFunc(ident);var cArgs=[];var stack=0;if(args){for(var i=0;i>0]=value;break;case"i8":HEAP8[ptr>>0]=value;break;case"i16":HEAP16[ptr>>1]=value;break;case"i32":HEAP32[ptr>>2]=value;break;case"i64":tempI64=[value>>>0,(tempDouble=value,+Math_abs(tempDouble)>=+1?tempDouble>+0?(Math_min(+Math_floor(tempDouble/+4294967296),+4294967295)|0)>>>0:~~+Math_ceil((tempDouble- +(~~tempDouble>>>0))/+4294967296)>>>0:0)],HEAP32[ptr>>2]=tempI64[0],HEAP32[ptr+4>>2]=tempI64[1];break;case"float":HEAPF32[ptr>>2]=value;break;case"double":HEAPF64[ptr>>3]=value;break;default:abort("invalid type for setValue: "+type)}}var ALLOC_STATIC=2;var ALLOC_NONE=4;function allocate(slab,types,allocator,ptr){var zeroinit,size;if(typeof slab==="number"){zeroinit=true;size=slab}else{zeroinit=false;size=slab.length}var singleType=typeof types==="string"?types:null;var ret;if(allocator==ALLOC_NONE){ret=ptr}else{ret=[typeof _malloc==="function"?_malloc:staticAlloc,stackAlloc,staticAlloc,dynamicAlloc][allocator===undefined?ALLOC_STATIC:allocator](Math.max(size,singleType?1:types.length))}if(zeroinit){var stop;ptr=ret;assert((ret&3)==0);stop=ret+(size&~3);for(;ptr>2]=0}stop=ret+size;while(ptr>0]=0}return ret}if(singleType==="i8"){if(slab.subarray||slab.slice){HEAPU8.set(slab,ret)}else{HEAPU8.set(new Uint8Array(slab),ret)}return ret}var i=0,type,typeSize,previousType;while(i>0];hasUtf|=t;if(t==0&&!length)break;i++;if(length&&i==length)break}if(!length)length=i;var ret="";if(hasUtf<128){var MAX_CHUNK=1024;var curr;while(length>0){curr=String.fromCharCode.apply(String,HEAPU8.subarray(ptr,ptr+Math.min(length,MAX_CHUNK)));ret=ret?ret+curr:curr;ptr+=MAX_CHUNK;length-=MAX_CHUNK}return ret}return UTF8ToString(ptr)}var UTF8Decoder=typeof TextDecoder!=="undefined"?new TextDecoder("utf8"):undefined;function UTF8ArrayToString(u8Array,idx){var endPtr=idx;while(u8Array[endPtr])++endPtr;if(endPtr-idx>16&&u8Array.subarray&&UTF8Decoder){return UTF8Decoder.decode(u8Array.subarray(idx,endPtr))}else{var u0,u1,u2,u3,u4,u5;var str="";while(1){u0=u8Array[idx++];if(!u0)return str;if(!(u0&128)){str+=String.fromCharCode(u0);continue}u1=u8Array[idx++]&63;if((u0&224)==192){str+=String.fromCharCode((u0&31)<<6|u1);continue}u2=u8Array[idx++]&63;if((u0&240)==224){u0=(u0&15)<<12|u1<<6|u2}else{u3=u8Array[idx++]&63;if((u0&248)==240){u0=(u0&7)<<18|u1<<12|u2<<6|u3}else{u4=u8Array[idx++]&63;if((u0&252)==248){u0=(u0&3)<<24|u1<<18|u2<<12|u3<<6|u4}else{u5=u8Array[idx++]&63;u0=(u0&1)<<30|u1<<24|u2<<18|u3<<12|u4<<6|u5}}}if(u0<65536){str+=String.fromCharCode(u0)}else{var ch=u0-65536;str+=String.fromCharCode(55296|ch>>10,56320|ch&1023)}}}}function UTF8ToString(ptr){return UTF8ArrayToString(HEAPU8,ptr)}function stringToUTF8Array(str,outU8Array,outIdx,maxBytesToWrite){if(!(maxBytesToWrite>0))return 0;var startIdx=outIdx;var endIdx=outIdx+maxBytesToWrite-1;for(var i=0;i=55296&&u<=57343)u=65536+((u&1023)<<10)|str.charCodeAt(++i)&1023;if(u<=127){if(outIdx>=endIdx)break;outU8Array[outIdx++]=u}else if(u<=2047){if(outIdx+1>=endIdx)break;outU8Array[outIdx++]=192|u>>6;outU8Array[outIdx++]=128|u&63}else if(u<=65535){if(outIdx+2>=endIdx)break;outU8Array[outIdx++]=224|u>>12;outU8Array[outIdx++]=128|u>>6&63;outU8Array[outIdx++]=128|u&63}else if(u<=2097151){if(outIdx+3>=endIdx)break;outU8Array[outIdx++]=240|u>>18;outU8Array[outIdx++]=128|u>>12&63;outU8Array[outIdx++]=128|u>>6&63;outU8Array[outIdx++]=128|u&63}else if(u<=67108863){if(outIdx+4>=endIdx)break;outU8Array[outIdx++]=248|u>>24;outU8Array[outIdx++]=128|u>>18&63;outU8Array[outIdx++]=128|u>>12&63;outU8Array[outIdx++]=128|u>>6&63;outU8Array[outIdx++]=128|u&63}else{if(outIdx+5>=endIdx)break;outU8Array[outIdx++]=252|u>>30;outU8Array[outIdx++]=128|u>>24&63;outU8Array[outIdx++]=128|u>>18&63;outU8Array[outIdx++]=128|u>>12&63;outU8Array[outIdx++]=128|u>>6&63;outU8Array[outIdx++]=128|u&63}}outU8Array[outIdx]=0;return outIdx-startIdx}function stringToUTF8(str,outPtr,maxBytesToWrite){return stringToUTF8Array(str,HEAPU8,outPtr,maxBytesToWrite)}function lengthBytesUTF8(str){var len=0;for(var i=0;i=55296&&u<=57343)u=65536+((u&1023)<<10)|str.charCodeAt(++i)&1023;if(u<=127){++len}else if(u<=2047){len+=2}else if(u<=65535){len+=3}else if(u<=2097151){len+=4}else if(u<=67108863){len+=5}else{len+=6}}return len}var UTF16Decoder=typeof TextDecoder!=="undefined"?new TextDecoder("utf-16le"):undefined;function demangle(func){return func}function demangleAll(text){var regex=/__Z[\w\d_]+/g;return text.replace(regex,(function(x){var y=demangle(x);return x===y?x:x+" ["+y+"]"}))}function jsStackTrace(){var err=new Error;if(!err.stack){try{throw new Error(0)}catch(e){err=e}if(!err.stack){return"(no stack trace available)"}}return err.stack.toString()}var WASM_PAGE_SIZE=65536;var ASMJS_PAGE_SIZE=16777216;var MIN_TOTAL_MEMORY=16777216;function alignUp(x,multiple){if(x%multiple>0){x+=multiple-x%multiple}return x}var buffer,HEAP8,HEAPU8,HEAP16,HEAPU16,HEAP32,HEAPU32,HEAPF32,HEAPF64;function updateGlobalBuffer(buf){Module["buffer"]=buffer=buf}function updateGlobalBufferViews(){Module["HEAP8"]=HEAP8=new Int8Array(buffer);Module["HEAP16"]=HEAP16=new Int16Array(buffer);Module["HEAP32"]=HEAP32=new Int32Array(buffer);Module["HEAPU8"]=HEAPU8=new Uint8Array(buffer);Module["HEAPU16"]=HEAPU16=new Uint16Array(buffer);Module["HEAPU32"]=HEAPU32=new Uint32Array(buffer);Module["HEAPF32"]=HEAPF32=new Float32Array(buffer);Module["HEAPF64"]=HEAPF64=new Float64Array(buffer)}var STATIC_BASE,STATICTOP,staticSealed;var STACK_BASE,STACKTOP,STACK_MAX;var DYNAMIC_BASE,DYNAMICTOP_PTR;STATIC_BASE=STATICTOP=STACK_BASE=STACKTOP=STACK_MAX=DYNAMIC_BASE=DYNAMICTOP_PTR=0;staticSealed=false;function abortOnCannotGrowMemory(){abort("Cannot enlarge memory arrays. Either (1) compile with -s TOTAL_MEMORY=X with X higher than the current value "+TOTAL_MEMORY+", (2) compile with -s ALLOW_MEMORY_GROWTH=1 which allows increasing the size at runtime but prevents some optimizations, (3) set Module.TOTAL_MEMORY to a higher value before the program runs, or (4) if you want malloc to return NULL (0) instead of this abort, compile with -s ABORTING_MALLOC=0 ")}if(!Module["reallocBuffer"])Module["reallocBuffer"]=(function(size){var ret;try{if(ArrayBuffer.transfer){ret=ArrayBuffer.transfer(buffer,size)}else{var oldHEAP8=HEAP8;ret=new ArrayBuffer(size);var temp=new Int8Array(ret);temp.set(oldHEAP8)}}catch(e){return false}var success=_emscripten_replace_memory(ret);if(!success)return false;return ret});function enlargeMemory(){var PAGE_MULTIPLE=Module["usingWasm"]?WASM_PAGE_SIZE:ASMJS_PAGE_SIZE;var LIMIT=2147483648-PAGE_MULTIPLE;if(HEAP32[DYNAMICTOP_PTR>>2]>LIMIT){return false}var OLD_TOTAL_MEMORY=TOTAL_MEMORY;TOTAL_MEMORY=Math.max(TOTAL_MEMORY,MIN_TOTAL_MEMORY);while(TOTAL_MEMORY>2]){if(TOTAL_MEMORY<=536870912){TOTAL_MEMORY=alignUp(2*TOTAL_MEMORY,PAGE_MULTIPLE)}else{TOTAL_MEMORY=Math.min(alignUp((3*TOTAL_MEMORY+2147483648)/4,PAGE_MULTIPLE),LIMIT)}}var replacement=Module["reallocBuffer"](TOTAL_MEMORY);if(!replacement||replacement.byteLength!=TOTAL_MEMORY){TOTAL_MEMORY=OLD_TOTAL_MEMORY;return false}updateGlobalBuffer(replacement);updateGlobalBufferViews();return true}var byteLength;try{byteLength=Function.prototype.call.bind(Object.getOwnPropertyDescriptor(ArrayBuffer.prototype,"byteLength").get);byteLength(new ArrayBuffer(4))}catch(e){byteLength=(function(buffer){return buffer.byteLength})}var TOTAL_STACK=Module["TOTAL_STACK"]||5242880;var TOTAL_MEMORY=Module["TOTAL_MEMORY"]||16777216;if(TOTAL_MEMORY0){var callback=callbacks.shift();if(typeof callback=="function"){callback();continue}var func=callback.func;if(typeof func==="number"){if(callback.arg===undefined){Module["dynCall_v"](func)}else{Module["dynCall_vi"](func,callback.arg)}}else{func(callback.arg===undefined?null:callback.arg)}}}var __ATPRERUN__=[];var __ATINIT__=[];var __ATMAIN__=[];var __ATEXIT__=[];var __ATPOSTRUN__=[];var runtimeInitialized=false;var runtimeExited=false;function preRun(){if(Module["preRun"]){if(typeof Module["preRun"]=="function")Module["preRun"]=[Module["preRun"]];while(Module["preRun"].length){addOnPreRun(Module["preRun"].shift())}}callRuntimeCallbacks(__ATPRERUN__)}function ensureInitRuntime(){if(runtimeInitialized)return;runtimeInitialized=true;callRuntimeCallbacks(__ATINIT__)}function preMain(){callRuntimeCallbacks(__ATMAIN__)}function exitRuntime(){callRuntimeCallbacks(__ATEXIT__);runtimeExited=true}function postRun(){if(Module["postRun"]){if(typeof Module["postRun"]=="function")Module["postRun"]=[Module["postRun"]];while(Module["postRun"].length){addOnPostRun(Module["postRun"].shift())}}callRuntimeCallbacks(__ATPOSTRUN__)}function addOnPreRun(cb){__ATPRERUN__.unshift(cb)}function addOnPreMain(cb){__ATMAIN__.unshift(cb)}function addOnPostRun(cb){__ATPOSTRUN__.unshift(cb)}function writeArrayToMemory(array,buffer){HEAP8.set(array,buffer)}function writeAsciiToMemory(str,buffer,dontAddNull){for(var i=0;i>0]=str.charCodeAt(i)}if(!dontAddNull)HEAP8[buffer>>0]=0}var Math_abs=Math.abs;var Math_cos=Math.cos;var Math_sin=Math.sin;var Math_tan=Math.tan;var Math_acos=Math.acos;var Math_asin=Math.asin;var Math_atan=Math.atan;var Math_atan2=Math.atan2;var Math_exp=Math.exp;var Math_log=Math.log;var Math_sqrt=Math.sqrt;var Math_ceil=Math.ceil;var Math_floor=Math.floor;var Math_pow=Math.pow;var Math_imul=Math.imul;var Math_fround=Math.fround;var Math_round=Math.round;var Math_min=Math.min;var Math_max=Math.max;var Math_clz32=Math.clz32;var Math_trunc=Math.trunc;var runDependencies=0;var runDependencyWatcher=null;var dependenciesFulfilled=null;function addRunDependency(id){runDependencies++;if(Module["monitorRunDependencies"]){Module["monitorRunDependencies"](runDependencies)}}function removeRunDependency(id){runDependencies--;if(Module["monitorRunDependencies"]){Module["monitorRunDependencies"](runDependencies)}if(runDependencies==0){if(runDependencyWatcher!==null){clearInterval(runDependencyWatcher);runDependencyWatcher=null}if(dependenciesFulfilled){var callback=dependenciesFulfilled;dependenciesFulfilled=null;callback()}}}Module["preloadedImages"]={};Module["preloadedAudios"]={};var memoryInitializer=null;var dataURIPrefix="data:application/octet-stream;base64,";function isDataURI(filename){return String.prototype.startsWith?filename.startsWith(dataURIPrefix):filename.indexOf(dataURIPrefix)===0}STATIC_BASE=GLOBAL_BASE;STATICTOP=STATIC_BASE+18640;__ATINIT__.push();memoryInitializer="data:application/octet-stream;base64,vBoAACwbAADkGgAAehsAACAAAAAAAAAAvBoAAKEbAADkGgAAvhsAACAAAAAAAAAAvBoAAOcbAADkGgAAAxwAADgAAAAAAAAAvBoAACUcAADkGgAAShwAADgAAAAAAAAA5BoAAI4mAABQAAAAAAAAAOQaAAB6HAAAiAAAAAAAAADkGgAA0xwAAJgAAAAAAAAA5BoAACcdAACoAAAAAAAAAOQaAABbHQAAuAAAAAAAAAC8GgAAhh0AAOQaAACqHQAA0AAAAAAAAAC8GgAASB4AAOQaAADmHgAA6AAAAAAAAADkGgAAfh8AAIgAAAAAAAAA5BoAAAcgAADoAAAAAAAAAOQaAAChIAAA6AAAAAAAAADkGgAARyEAAOgAAAAAAAAA5BoAAN0hAAA4AQAAAAAAALwaAACIIgAA5BoAADMjAABQAQAAAAAAAOQaAADYIwAAiAAAAAAAAADkGgAAbiQAAFABAAAAAAAA5BoAABUlAABQAQAAAAAAAOQaAADIJQAAUAEAAAAAAADkGgAAqDEAAGgAAAAAAAAA5BoAALomAACwAQAAAAAAAOQaAAArJwAAmAAAAAAAAADkGgAAlycAANABAAAAAAAAvBoAAE0oAADkGgAAAykAAOgBAAAAAAAA5BoAALMpAACwAQAAAAAAAOQaAABUKgAA6AEAAAAAAADkGgAABisAAOgBAAAAAAAA5BoAAMQrAADoAQAAAAAAAOQaAAByLAAAOAIAAAAAAAC8GgAANS0AAOQaAAD4LQAAUAIAAAAAAADkGgAAtS4AALABAAAAAAAA5BoAAGMvAABQAgAAAAAAAOQaAAAiMAAAUAIAAAAAAADkGgAA7TAAAFACAAAAAAAA5BoAANMxAABoAAAAAAAAAOQaAABJMgAACAAAAAAAAAC8GgAAFjIAAOQaAABcMgAAsAIAAAAAAADkGgAAbDMAAHgDAAAAAAAA5BoAAOE3AABgAwAAAAAAALwaAAAsNAAA5BoAAJM0AADoAgAAAAAAAOQaAAAANQAAEAMAAAAAAAC8GgAAizUAALwaAAClNQAA5BoAAP81AAAYAwAAAAAAAOQaAABfNgAAEAMAAAAAAADkGgAA3TYAABgDAAAAAAAA5BoAAEY3AAAQAwAAAAAAALwaAAAtOAAA5BoAAFs4AABgAwAAAAAAAOQaAADMOAAAqAMAAAAAAADkGgAAEjkAAHgDAAAAAAAA5BoAAPg4AAAQAwAAAAAAALwaAAAyOQAA5BoAAAc6AACoAwAAAAAAAOQaAAApOgAAqAMAAAAAAADkGgAATzoAAOADAAAAAAAAvBoAAKE6AAC8GgAA0EQAAOQaAAAwRQAAAAQAAAAAAADkGgAA3UQAABAEAAAAAAAAvBoAAP5EAADkGgAAC0UAAPADAAAAAAAA5BoAABJGAADoAwAAAAAAAOQaAAAiRgAAKAQAAAAAAADkGgAAV0YAAAAEAAAAAAAA5BoAADNGAABIBAAAAAAAAAAAAAAIAAAAAQAAAAIAAAAAAAAAEAAAAAMAAAAEAAAAAQAAAAEAAAABAAAAAAAAACgAAAAFAAAABgAAAAIAAAACAAAAAgAAAP//////////AAAAADgAAAAHAAAACAAAAAEAAAADAAAAAQAAAAQAAAAFAAAAAgAAAAYAAAAHAAAAAwAAAAEAAAAIAAAAAAAAAEAAAAAJAAAACgAAAAEAAAADAAAABAAAAAQAAAAFAAAAAgAAAAYAAAAHAAAABQAAAAkAAAAKAAAAAAAAAFAAAAALAAAADAAAAAMAAAALAAAADAAAAAQAAAANAAAABgAAAAcAAAAOAAAADwAAAAUAAAAAAAAAWAAAAA0AAAAOAAAABgAAABAAAAAIAAAAEQAAABIAAAAHAAAAEwAAABQAAAAJAAAAFQAAABYAAAAKAAAAAQAAAAAAAABoAAAADwAAABAAAAAIAAAACwAAABcAAAAEAAAADQAAAAYAAAALAAAADgAAAA8AAAAJAAAAAgAAAAoAAAD/////AAAAAIgAAAARAAAAEgAAAAEAAAAMAAAAAQAAAA0AAAAYAAAAGQAAAA4AAAAPAAAAGgAAAAEAAAAAAAAAeAAAABEAAAATAAAAEAAAAAwAAAARAAAADQAAABgAAAAZAAAADgAAAA8AAAAaAAAAAQAAAAAAAAAYAQAAEQAAABQAAAASAAAADAAAABMAAAANAAAAGAAAABkAAAAOAAAADwAAABoAAAACAAAAAAAAAPgAAAAVAAAAFgAAABQAAAAMAAAAFQAAABYAAAAbAAAAHAAAAA4AAAAPAAAAHQAAAAMAAAAAAAAA2AAAABcAAAAYAAAAFwAAAAwAAAAYAAAAGQAAAB4AAAAfAAAADgAAAA8AAAAgAAAABAAAAAAAAADAAAAAGQAAABoAAAAhAAAAGgAAAAMAAAAAAAAA6AAAABEAAAAbAAAAAQAAAAwAAAABAAAADQAAABgAAAAZAAAADgAAAA8AAAAaAAAAAQAAAAAAAAAIAQAAHAAAAB0AAAAbAAAADAAAABwAAAANAAAAGAAAABkAAAAOAAAADwAAACIAAAAFAAAAAAAAAIABAAARAAAAHgAAAB0AAAAMAAAAHgAAAA0AAAAYAAAAGQAAAA4AAAAPAAAAGgAAAAYAAAAAAAAAYAEAAB8AAAAgAAAAHwAAAAwAAAAgAAAAIQAAACMAAAAkAAAADgAAAA8AAAAlAAAABwAAAAAAAABAAQAAIQAAACIAAAAiAAAADAAAACMAAAAkAAAAJgAAACcAAAAOAAAADwAAACgAAAAIAAAAAAAAACgBAAAjAAAAJAAAACkAAAAlAAAABAAAAAAAAABQAQAAEQAAACUAAAABAAAADAAAAAEAAAANAAAAGAAAABkAAAAOAAAADwAAABoAAAABAAAAAAAAAHABAAAmAAAAJwAAACYAAAAMAAAAJwAAAA0AAAAYAAAAGQAAAA4AAAAPAAAAKgAAAAkAAAAAAAAAkAEAACgAAAApAAAACwAAAAsAAAAXAAAABAAAACsAAAAoAAAAKQAAAA4AAAAPAAAACQAAAAUAAAAMAAAAAAAAAKABAAAqAAAAKwAAACoAAAArAAAALAAAAC0AAAAsAAAALQAAAC4AAAAvAAAALgAAAAoAAAAAAAAAGAIAACoAAAAsAAAAMAAAACsAAAAxAAAALQAAACwAAAAtAAAALgAAAC8AAAAuAAAACwAAAAAAAAAIAgAALQAAAC4AAAAyAAAAKwAAADMAAAAtAAAALAAAAC0AAAAuAAAALwAAAC8AAAAMAAAAAAAAAPgBAAAvAAAAMAAAADQAAAArAAAANQAAADYAAAAwAAAAMQAAAC4AAAAvAAAAMgAAAA0AAAAAAAAA2AEAADEAAAAyAAAANwAAACsAAAA4AAAAOQAAADMAAAA0AAAALgAAAC8AAAA1AAAADgAAAAAAAADAAQAAMwAAADQAAAA2AAAAOgAAAAYAAAAAAAAAgAIAACoAAAA1AAAAOwAAACsAAAA8AAAALQAAACwAAAAtAAAALgAAAC8AAAAuAAAADwAAAAAAAABwAgAANgAAADcAAAA9AAAAKwAAAD4AAAAtAAAALAAAAC0AAAAuAAAALwAAADcAAAAQAAAAAAAAAGACAAA4AAAAOQAAAD8AAAArAAAAQAAAAEEAAAA4AAAAOQAAAC4AAAAvAAAAOgAAABEAAAAAAAAAQAIAADoAAAA7AAAAQgAAACsAAABDAAAARAAAADsAAAA8AAAALgAAAC8AAAA9AAAAEgAAAAAAAAAoAgAAPAAAAD0AAAA+AAAARQAAAAcAAAAAAAAAkAIAAD4AAAA/AAAADQAAAAsAAAAXAAAABAAAAD8AAABGAAAARwAAAA4AAAAPAAAACQAAAAIAAAAOAAAAAAAAAKACAAABAAAAQAAAAAEAAAACAAAAAAAAALACAABBAAAAQgAAAAAAAAC4AgAAQQAAAEMAAAAAAAAAyAIAAEQAAABFAAAASAAAAEkAAABKAAAASwAAAAMAAABMAAAATQAAAEAAAABBAAAATgAAAEYAAABPAAAAQgAAAEMAAAAEAAAARwAAAAAAAADYAgAASAAAAEkAAABEAAAARQAAAEYAAABHAAAASAAAAAUAAABQAAAASQAAAFEAAAABAAAAAwAAAAAAAAADAAAAAAAAAAMAAAAAAAAAAwAAAAAAAAAAAwAASgAAAEsAAABKAAAAUgAAAAAAAADwAgAATAAAAE0AAAAIAAAAAAAAAOgCAABOAAAATwAAAAgAAAD/////AAAAADADAABQAAAAUQAAAEsAAABTAAAAAAAAACADAABSAAAAUwAAAAkAAAAAAAAAGAMAAFQAAABVAAAACQAAAAAAAABQAwAAVgAAAFcAAABMAAAAVAAAAAAAAABAAwAAWAAAAFkAAAAJAAAAAAAAAGgDAABaAAAAWwAAAE0AAABOAAAATwAAAFAAAABRAAAABgAAAFUAAABSAAAAVgAAAAAAAAABAAAABQAAAAIAAAAFAAAAAwAAAAUAAAAEAAAAAAAAAHgDAABcAAAAXQAAAEgAAAABAAAAVwAAAEsAAAADAAAATAAAAE0AAAABAAAAUwAAAE4AAAABAAAAWAAAAFQAAABVAAAAAQAAAAEAAAAAAAAAiAMAAFwAAABeAAAASAAAAFkAAABXAAAASwAAAAMAAABMAAAATQAAAFYAAABTAAAATgAAAF8AAABYAAAAVAAAAFUAAAAHAAAAYAAAAAAAAACYAwAAYQAAAGIAAABXAAAAWgAAAAAAAACoAwAAXAAAAGMAAABbAAAAAQAAAFcAAABLAAAACAAAAEwAAABNAAAAAQAAAFMAAABOAAAAAQAAAAAAAACwAwAAXAAAAGQAAABbAAAAXAAAAFcAAABLAAAACQAAAEwAAABNAAAAWAAAAFMAAABOAAAAZQAAAAAAAADAAwAAXAAAAGYAAABbAAAAXQAAAFcAAABLAAAACgAAAEwAAABNAAAAWQAAAFMAAABOAAAAZwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAFZVVVUCAAAAAAAAAAIAAACamZmZAwAAAFZVVVUDAAAAJUmSJAMAAAAAAAAAAwAAAMhxHMcEAAAAmpmZmQQAAABGF110BAAAAFZVVVUEAAAAPLETOwQAAAAlSZIkBAAAABIREREEAAAAAAAAAAQAAADi4eHhBQAAAMhxHMcFAAAAy2sorwUAAACamZmZBQAAAIdhGIYFAAAARhdddAUAAACRhSxkBQAAAFZVVVUFAAAAexSuRwUAAAA8sRM7BQAAANtLaC8FAAAAJUmSJAUAAAASlnsaBQAAABIREREFAAAAhRBCCAUAAAAAAAAABQAAAAgffPAGAAAA4uHh4QYAAADVQR3UBgAAAMhxHMcGAAAATZHPugYAAADLayivBgAAAKVBGqQGAAAAmpmZmQYAAAD6GJyPBgAAAIdhGIYGAAAAGPQFfQYAAABGF110BgAAAG3BFmwGAAAAkYUsZAYAAAC6gphcBgAAAFZVVVUGAAAAcwpeTgYAAAB7FK5HBgAAAEJBQUEGAAAAPLETOwYAAACzzyE1BgAAANtLaC8GAAAAnxLkKQYAAAAlSZIkBgAAAN1HcB8GAAAAEpZ7GgYAAAD45bEVBgAAABIREREGAAAA/BSXDAYAAACFEEIIBgAAAAVBEAQGAAAAAAAAAAYAAAD5gR/4BwAAAAgffPAHAAAAwBoT6QcAAADi4eHhBwAAAGwH5toHAAAA1UEd1AcAAACRaIXNBwAAAMhxHMcHAAAAOXDgwAcAAABNkc+6BwAAAE8b6LQHAAAAy2sorwcAAAAH9o6pBwAAAKVBGqQHAAAAUunIngcAAACamZmZBwAAAM4Pi5QHAAAA+hicjwcAAAD3kMuKBwAAAIdhGIYHAAAAgoGBgQcAAAAY9AV9BwAAABjIpHgHAAAARhdddAcAAADBBS5wBwAAAG3BFmwHAAAAaYEWaAcAAACRhSxkBwAAAAYWWGAHAAAAuoKYXAcAAAAJI+1YBwAAAFZVVVUHAAAAr37QUQcAAABzCl5OBwAAAAZq/UoHAAAAexSuRwcAAABXhm9EBwAAAEJBQUEHAAAAz8siPgcAAAA8sRM7BwAAADmBEzgHAAAAs88hNQcAAACjND4yBwAAANtLaC8HAAAA2bSfLAcAAACfEuQpBwAAAIkLNScHAAAAJUmSJAcAAAATePshBwAAAN1HcB8HAAAA22rwHAcAAAASlnsaBwAAABmBERgHAAAA+OWxFQcAAAAUgVwTBwAAABIREREHAAAAv1bPDgcAAAD8FJcMBwAAAKcQaAoHAAAAhRBCCAcAAAAw3SQGBwAAAAVBEAQHAAAAEQgEAgcAAAAAAAAABwAAACDwB/wIAAAA+YEf+AgAAADlWUb0CAAAAAgffPAIAAAAMXvA7AgAAADAGhPpCAAAAJGsc+UIAAAA4uHh4QgAAABAbl3eCAAAAGwH5toIAAAATGV71wgAAADVQR3UCAAAAPdYy9AIAAAAkWiFzQgAAABWMEvKCAAAAMhxHMcIAAAAHfD4wwgAAAA5cODACAAAAJq40r0IAAAATZHPuggAAADew9a3CAAAAE8b6LQIAAAAB2QDsggAAADLayivCAAAAK0BV6wIAAAAB/aOqQgAAABuGtCmCAAAAKVBGqQIAAAAmD9toQgAAABS6cieCAAAAO8ULZwIAAAAmpmZmQgAAACBTw6XCAAAAM4Pi5QIAAAAnrQPkggAAAD6GJyPCAAAANQYMI0IAAAA95DLiggAAAALX26ICAAAAIdhGIYIAAAArHfJgwgAAACCgYGBCAAAANFfQH8IAAAAGPQFfQgAAACPINJ6CAAAABjIpHgIAAAARM59dggAAABGF110CAAAAPWHQnIIAAAAwQUucAgAAAC1dh9uCAAAAG3BFmwIAAAAFs0TaggAAABpgRZoCAAAAKbGHmYIAAAAkYUsZAgAAABxpz9iCAAAAAYWWGAIAAAAjrt1XggAAAC6gphcCAAAALFWwFoIAAAACSPtWAgAAADG0x5XCAAAAFZVVVUIAAAAkJSQUwgAAACvftBRCAAAAFEBFVAIAAAAcwpeTggAAABziKtMCAAAAAZq/UoIAAAAPJ5TSQgAAAB7FK5HCAAAAIC8DEYIAAAAV4ZvRAgAAABeYtZCCAAAAEJBQUEIAAAA/BOwPwgAAADPyyI+CAAAAEhamTwIAAAAPLETOwgAAADCwpE5CAAAADmBEzgIAAAAPt+YNggAAACzzyE1CAAAALZFrjMIAAAAozQ+MggAAAAUkNEwCAAAANtLaC8IAAAABVwCLggAAADZtJ8sCAAAANFKQCsIAAAAnxLkKQgAAAApAYsoCAAAAIkLNScIAAAACSfiJQgAAAAlSZIkCAAAAIpnRSMIAAAAE3j7IQgAAADHcLQgCAAAAN1HcB8IAAAAtPMuHggAAADbavAcCAAAAAWktBsIAAAAEpZ7GggAAAAJOEUZCAAAABmBERgIAAAAlWjgFggAAAD45bEVCAAAAOHwhRQIAAAAFIFcEwgAAAB2jjUSCAAAABIREREIAAAAEAHvDwgAAAC/Vs8OCAAAAIkKsg0IAAAA/BSXDAgAAADDbn4LCAAAAKcQaAoIAAAAkfNTCQgAAACFEEIICAAAAKVgMgcIAAAAMN0kBggAAAB+fxkFCAAAAAVBEAQIAAAAUhsJAwgAAAARCAQCCAAAAAIBAQEIAAAA/////wAAAADQAwAAaAAAAGkAAAAKAAAACwAAAF4AAABqAAAACwAAAP////8AAAAA4AMAAGsAAABsAAAADAAAAAwAAABeAAAAagAAAA0AAAD/////HBcAAAUAAAAAAAAAAAAAAF8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA8AAAAQAAAAyEgAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAAAAAAAAAP//////AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJxIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA//////8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAoAAABkAAAA6AMAABAnAACghgEAQEIPAICWmAAA4fUFX3CJAP8JLw8AAAAAAgAAAAMAAAAFAAAABwAAAAsAAAANAAAAEQAAABMAAAAXAAAAHQAAAB8AAAAlAAAAKQAAACsAAAAvAAAANQAAADsAAAA9AAAAQwAAAEcAAABJAAAATwAAAFMAAABZAAAAYQAAAGUAAABnAAAAawAAAG0AAABxAAAAfwAAAIMAAACJAAAAiwAAAJUAAACXAAAAnQAAAKMAAACnAAAArQAAALMAAAC1AAAAvwAAAMEAAADFAAAAxwAAANMAAAABAAAACwAAAA0AAAARAAAAEwAAABcAAAAdAAAAHwAAACUAAAApAAAAKwAAAC8AAAA1AAAAOwAAAD0AAABDAAAARwAAAEkAAABPAAAAUwAAAFkAAABhAAAAZQAAAGcAAABrAAAAbQAAAHEAAAB5AAAAfwAAAIMAAACJAAAAiwAAAI8AAACVAAAAlwAAAJ0AAACjAAAApwAAAKkAAACtAAAAswAAALUAAAC7AAAAvwAAAMEAAADFAAAAxwAAANEAAAACAAAAAAAAAPADAABtAAAAbgAAAG8AAABwAAAAEgAAAAEAAAABAAAAAwAAAAAAAAAYBAAAbQAAAHEAAABvAAAAcAAAABIAAAACAAAAAgAAAAQAAAAAAAAAKAQAAHIAAABzAAAAYAAAAAAAAAA4BAAAcgAAAHQAAABgAAAATjVkcmFjbzExRW5jb2RlckJhc2VJTlNfMThFbmNvZGVyT3B0aW9uc0Jhc2VJTlNfMTdHZW9tZXRyeUF0dHJpYnV0ZTRUeXBlRUVFRUUATjVkcmFjbzI4QXR0cmlidXRlT2N0YWhlZHJvblRyYW5zZm9ybUUATjVkcmFjbzE4QXR0cmlidXRlVHJhbnNmb3JtRQBONWRyYWNvMzBBdHRyaWJ1dGVRdWFudGl6YXRpb25UcmFuc2Zvcm1FAE41ZHJhY28xN0F0dHJpYnV0ZXNFbmNvZGVyRQBONWRyYWNvMjNLZFRyZWVBdHRyaWJ1dGVzRW5jb2RlckUATjVkcmFjbzI2U2VxdWVudGlhbEF0dHJpYnV0ZUVuY29kZXJFAE41ZHJhY28zN1NlcXVlbnRpYWxBdHRyaWJ1dGVFbmNvZGVyc0NvbnRyb2xsZXJFAE41ZHJhY28yOFByZWRpY3Rpb25TY2hlbWVEZWx0YUVuY29kZXJJaU5TXzM3UHJlZGljdGlvblNjaGVtZVdyYXBFbmNvZGluZ1RyYW5zZm9ybUlpaUVFRUUATjVkcmFjbzIzUHJlZGljdGlvblNjaGVtZUVuY29kZXJJaU5TXzM3UHJlZGljdGlvblNjaGVtZVdyYXBFbmNvZGluZ1RyYW5zZm9ybUlpaUVFRUUATjVkcmFjbzM3UHJlZGljdGlvblNjaGVtZVR5cGVkRW5jb2RlckludGVyZmFjZUlpaUVFAE41ZHJhY28zMlByZWRpY3Rpb25TY2hlbWVFbmNvZGVySW50ZXJmYWNlRQBONWRyYWNvMjVQcmVkaWN0aW9uU2NoZW1lSW50ZXJmYWNlRQBONWRyYWNvNDhNZXNoUHJlZGljdGlvblNjaGVtZUdlb21ldHJpY05vcm1hbFByZWRpY3RvckFyZWFJaU5TXzM3UHJlZGljdGlvblNjaGVtZVdyYXBFbmNvZGluZ1RyYW5zZm9ybUlpaUVFTlNfMjRNZXNoUHJlZGljdGlvblNjaGVtZURhdGFJTlNfMTFDb3JuZXJUYWJsZUVFRUVFAE41ZHJhY280OE1lc2hQcmVkaWN0aW9uU2NoZW1lR2VvbWV0cmljTm9ybWFsUHJlZGljdG9yQmFzZUlpTlNfMzdQcmVkaWN0aW9uU2NoZW1lV3JhcEVuY29kaW5nVHJhbnNmb3JtSWlpRUVOU18yNE1lc2hQcmVkaWN0aW9uU2NoZW1lRGF0YUlOU18xMUNvcm5lclRhYmxlRUVFRUUATjVkcmFjbzQyTWVzaFByZWRpY3Rpb25TY2hlbWVHZW9tZXRyaWNOb3JtYWxFbmNvZGVySWlOU18zN1ByZWRpY3Rpb25TY2hlbWVXcmFwRW5jb2RpbmdUcmFuc2Zvcm1JaWlFRU5TXzI0TWVzaFByZWRpY3Rpb25TY2hlbWVEYXRhSU5TXzExQ29ybmVyVGFibGVFRUVFRQBONWRyYWNvMjdNZXNoUHJlZGljdGlvblNjaGVtZUVuY29kZXJJaU5TXzM3UHJlZGljdGlvblNjaGVtZVdyYXBFbmNvZGluZ1RyYW5zZm9ybUlpaUVFTlNfMjRNZXNoUHJlZGljdGlvblNjaGVtZURhdGFJTlNfMTFDb3JuZXJUYWJsZUVFRUVFAE41ZHJhY280NE1lc2hQcmVkaWN0aW9uU2NoZW1lVGV4Q29vcmRzUG9ydGFibGVFbmNvZGVySWlOU18zN1ByZWRpY3Rpb25TY2hlbWVXcmFwRW5jb2RpbmdUcmFuc2Zvcm1JaWlFRU5TXzI0TWVzaFByZWRpY3Rpb25TY2hlbWVEYXRhSU5TXzExQ29ybmVyVGFibGVFRUVFRQBONWRyYWNvNTZNZXNoUHJlZGljdGlvblNjaGVtZUNvbnN0cmFpbmVkTXVsdGlQYXJhbGxlbG9ncmFtRW5jb2RlcklpTlNfMzdQcmVkaWN0aW9uU2NoZW1lV3JhcEVuY29kaW5nVHJhbnNmb3JtSWlpRUVOU18yNE1lc2hQcmVkaWN0aW9uU2NoZW1lRGF0YUlOU18xMUNvcm5lclRhYmxlRUVFRUUATjVkcmFjbzQwTWVzaFByZWRpY3Rpb25TY2hlbWVQYXJhbGxlbG9ncmFtRW5jb2RlcklpTlNfMzdQcmVkaWN0aW9uU2NoZW1lV3JhcEVuY29kaW5nVHJhbnNmb3JtSWlpRUVOU18yNE1lc2hQcmVkaWN0aW9uU2NoZW1lRGF0YUlOU18xMUNvcm5lclRhYmxlRUVFRUUATjVkcmFjbzQ4TWVzaFByZWRpY3Rpb25TY2hlbWVHZW9tZXRyaWNOb3JtYWxQcmVkaWN0b3JBcmVhSWlOU18zN1ByZWRpY3Rpb25TY2hlbWVXcmFwRW5jb2RpbmdUcmFuc2Zvcm1JaWlFRU5TXzI0TWVzaFByZWRpY3Rpb25TY2hlbWVEYXRhSU5TXzI0TWVzaEF0dHJpYnV0ZUNvcm5lclRhYmxlRUVFRUUATjVkcmFjbzQ4TWVzaFByZWRpY3Rpb25TY2hlbWVHZW9tZXRyaWNOb3JtYWxQcmVkaWN0b3JCYXNlSWlOU18zN1ByZWRpY3Rpb25TY2hlbWVXcmFwRW5jb2RpbmdUcmFuc2Zvcm1JaWlFRU5TXzI0TWVzaFByZWRpY3Rpb25TY2hlbWVEYXRhSU5TXzI0TWVzaEF0dHJpYnV0ZUNvcm5lclRhYmxlRUVFRUUATjVkcmFjbzQyTWVzaFByZWRpY3Rpb25TY2hlbWVHZW9tZXRyaWNOb3JtYWxFbmNvZGVySWlOU18zN1ByZWRpY3Rpb25TY2hlbWVXcmFwRW5jb2RpbmdUcmFuc2Zvcm1JaWlFRU5TXzI0TWVzaFByZWRpY3Rpb25TY2hlbWVEYXRhSU5TXzI0TWVzaEF0dHJpYnV0ZUNvcm5lclRhYmxlRUVFRUUATjVkcmFjbzI3TWVzaFByZWRpY3Rpb25TY2hlbWVFbmNvZGVySWlOU18zN1ByZWRpY3Rpb25TY2hlbWVXcmFwRW5jb2RpbmdUcmFuc2Zvcm1JaWlFRU5TXzI0TWVzaFByZWRpY3Rpb25TY2hlbWVEYXRhSU5TXzI0TWVzaEF0dHJpYnV0ZUNvcm5lclRhYmxlRUVFRUUATjVkcmFjbzQ0TWVzaFByZWRpY3Rpb25TY2hlbWVUZXhDb29yZHNQb3J0YWJsZUVuY29kZXJJaU5TXzM3UHJlZGljdGlvblNjaGVtZVdyYXBFbmNvZGluZ1RyYW5zZm9ybUlpaUVFTlNfMjRNZXNoUHJlZGljdGlvblNjaGVtZURhdGFJTlNfMjRNZXNoQXR0cmlidXRlQ29ybmVyVGFibGVFRUVFRQBONWRyYWNvNTZNZXNoUHJlZGljdGlvblNjaGVtZUNvbnN0cmFpbmVkTXVsdGlQYXJhbGxlbG9ncmFtRW5jb2RlcklpTlNfMzdQcmVkaWN0aW9uU2NoZW1lV3JhcEVuY29kaW5nVHJhbnNmb3JtSWlpRUVOU18yNE1lc2hQcmVkaWN0aW9uU2NoZW1lRGF0YUlOU18yNE1lc2hBdHRyaWJ1dGVDb3JuZXJUYWJsZUVFRUVFAE41ZHJhY280ME1lc2hQcmVkaWN0aW9uU2NoZW1lUGFyYWxsZWxvZ3JhbUVuY29kZXJJaU5TXzM3UHJlZGljdGlvblNjaGVtZVdyYXBFbmNvZGluZ1RyYW5zZm9ybUlpaUVFTlNfMjRNZXNoUHJlZGljdGlvblNjaGVtZURhdGFJTlNfMjRNZXNoQXR0cmlidXRlQ29ybmVyVGFibGVFRUVFRQB1c2VfYnVpbHRfaW5fYXR0cmlidXRlX2NvbXByZXNzaW9uAE41ZHJhY28zM1NlcXVlbnRpYWxJbnRlZ2VyQXR0cmlidXRlRW5jb2RlckUATjVkcmFjbzI4UHJlZGljdGlvblNjaGVtZURlbHRhRW5jb2RlcklpTlNfNjJQcmVkaWN0aW9uU2NoZW1lTm9ybWFsT2N0YWhlZHJvbkNhbm9uaWNhbGl6ZWRFbmNvZGluZ1RyYW5zZm9ybUlpRUVFRQBONWRyYWNvMjNQcmVkaWN0aW9uU2NoZW1lRW5jb2RlcklpTlNfNjJQcmVkaWN0aW9uU2NoZW1lTm9ybWFsT2N0YWhlZHJvbkNhbm9uaWNhbGl6ZWRFbmNvZGluZ1RyYW5zZm9ybUlpRUVFRQBONWRyYWNvNDhNZXNoUHJlZGljdGlvblNjaGVtZUdlb21ldHJpY05vcm1hbFByZWRpY3RvckFyZWFJaU5TXzYyUHJlZGljdGlvblNjaGVtZU5vcm1hbE9jdGFoZWRyb25DYW5vbmljYWxpemVkRW5jb2RpbmdUcmFuc2Zvcm1JaUVFTlNfMjRNZXNoUHJlZGljdGlvblNjaGVtZURhdGFJTlNfMTFDb3JuZXJUYWJsZUVFRUVFAE41ZHJhY280OE1lc2hQcmVkaWN0aW9uU2NoZW1lR2VvbWV0cmljTm9ybWFsUHJlZGljdG9yQmFzZUlpTlNfNjJQcmVkaWN0aW9uU2NoZW1lTm9ybWFsT2N0YWhlZHJvbkNhbm9uaWNhbGl6ZWRFbmNvZGluZ1RyYW5zZm9ybUlpRUVOU18yNE1lc2hQcmVkaWN0aW9uU2NoZW1lRGF0YUlOU18xMUNvcm5lclRhYmxlRUVFRUUATjVkcmFjbzQyTWVzaFByZWRpY3Rpb25TY2hlbWVHZW9tZXRyaWNOb3JtYWxFbmNvZGVySWlOU182MlByZWRpY3Rpb25TY2hlbWVOb3JtYWxPY3RhaGVkcm9uQ2Fub25pY2FsaXplZEVuY29kaW5nVHJhbnNmb3JtSWlFRU5TXzI0TWVzaFByZWRpY3Rpb25TY2hlbWVEYXRhSU5TXzExQ29ybmVyVGFibGVFRUVFRQBONWRyYWNvMjdNZXNoUHJlZGljdGlvblNjaGVtZUVuY29kZXJJaU5TXzYyUHJlZGljdGlvblNjaGVtZU5vcm1hbE9jdGFoZWRyb25DYW5vbmljYWxpemVkRW5jb2RpbmdUcmFuc2Zvcm1JaUVFTlNfMjRNZXNoUHJlZGljdGlvblNjaGVtZURhdGFJTlNfMTFDb3JuZXJUYWJsZUVFRUVFAE41ZHJhY280NE1lc2hQcmVkaWN0aW9uU2NoZW1lVGV4Q29vcmRzUG9ydGFibGVFbmNvZGVySWlOU182MlByZWRpY3Rpb25TY2hlbWVOb3JtYWxPY3RhaGVkcm9uQ2Fub25pY2FsaXplZEVuY29kaW5nVHJhbnNmb3JtSWlFRU5TXzI0TWVzaFByZWRpY3Rpb25TY2hlbWVEYXRhSU5TXzExQ29ybmVyVGFibGVFRUVFRQBONWRyYWNvNTZNZXNoUHJlZGljdGlvblNjaGVtZUNvbnN0cmFpbmVkTXVsdGlQYXJhbGxlbG9ncmFtRW5jb2RlcklpTlNfNjJQcmVkaWN0aW9uU2NoZW1lTm9ybWFsT2N0YWhlZHJvbkNhbm9uaWNhbGl6ZWRFbmNvZGluZ1RyYW5zZm9ybUlpRUVOU18yNE1lc2hQcmVkaWN0aW9uU2NoZW1lRGF0YUlOU18xMUNvcm5lclRhYmxlRUVFRUUATjVkcmFjbzQwTWVzaFByZWRpY3Rpb25TY2hlbWVQYXJhbGxlbG9ncmFtRW5jb2RlcklpTlNfNjJQcmVkaWN0aW9uU2NoZW1lTm9ybWFsT2N0YWhlZHJvbkNhbm9uaWNhbGl6ZWRFbmNvZGluZ1RyYW5zZm9ybUlpRUVOU18yNE1lc2hQcmVkaWN0aW9uU2NoZW1lRGF0YUlOU18xMUNvcm5lclRhYmxlRUVFRUUATjVkcmFjbzQ4TWVzaFByZWRpY3Rpb25TY2hlbWVHZW9tZXRyaWNOb3JtYWxQcmVkaWN0b3JBcmVhSWlOU182MlByZWRpY3Rpb25TY2hlbWVOb3JtYWxPY3RhaGVkcm9uQ2Fub25pY2FsaXplZEVuY29kaW5nVHJhbnNmb3JtSWlFRU5TXzI0TWVzaFByZWRpY3Rpb25TY2hlbWVEYXRhSU5TXzI0TWVzaEF0dHJpYnV0ZUNvcm5lclRhYmxlRUVFRUUATjVkcmFjbzQ4TWVzaFByZWRpY3Rpb25TY2hlbWVHZW9tZXRyaWNOb3JtYWxQcmVkaWN0b3JCYXNlSWlOU182MlByZWRpY3Rpb25TY2hlbWVOb3JtYWxPY3RhaGVkcm9uQ2Fub25pY2FsaXplZEVuY29kaW5nVHJhbnNmb3JtSWlFRU5TXzI0TWVzaFByZWRpY3Rpb25TY2hlbWVEYXRhSU5TXzI0TWVzaEF0dHJpYnV0ZUNvcm5lclRhYmxlRUVFRUUATjVkcmFjbzQyTWVzaFByZWRpY3Rpb25TY2hlbWVHZW9tZXRyaWNOb3JtYWxFbmNvZGVySWlOU182MlByZWRpY3Rpb25TY2hlbWVOb3JtYWxPY3RhaGVkcm9uQ2Fub25pY2FsaXplZEVuY29kaW5nVHJhbnNmb3JtSWlFRU5TXzI0TWVzaFByZWRpY3Rpb25TY2hlbWVEYXRhSU5TXzI0TWVzaEF0dHJpYnV0ZUNvcm5lclRhYmxlRUVFRUUATjVkcmFjbzI3TWVzaFByZWRpY3Rpb25TY2hlbWVFbmNvZGVySWlOU182MlByZWRpY3Rpb25TY2hlbWVOb3JtYWxPY3RhaGVkcm9uQ2Fub25pY2FsaXplZEVuY29kaW5nVHJhbnNmb3JtSWlFRU5TXzI0TWVzaFByZWRpY3Rpb25TY2hlbWVEYXRhSU5TXzI0TWVzaEF0dHJpYnV0ZUNvcm5lclRhYmxlRUVFRUUATjVkcmFjbzQ0TWVzaFByZWRpY3Rpb25TY2hlbWVUZXhDb29yZHNQb3J0YWJsZUVuY29kZXJJaU5TXzYyUHJlZGljdGlvblNjaGVtZU5vcm1hbE9jdGFoZWRyb25DYW5vbmljYWxpemVkRW5jb2RpbmdUcmFuc2Zvcm1JaUVFTlNfMjRNZXNoUHJlZGljdGlvblNjaGVtZURhdGFJTlNfMjRNZXNoQXR0cmlidXRlQ29ybmVyVGFibGVFRUVFRQBONWRyYWNvNTZNZXNoUHJlZGljdGlvblNjaGVtZUNvbnN0cmFpbmVkTXVsdGlQYXJhbGxlbG9ncmFtRW5jb2RlcklpTlNfNjJQcmVkaWN0aW9uU2NoZW1lTm9ybWFsT2N0YWhlZHJvbkNhbm9uaWNhbGl6ZWRFbmNvZGluZ1RyYW5zZm9ybUlpRUVOU18yNE1lc2hQcmVkaWN0aW9uU2NoZW1lRGF0YUlOU18yNE1lc2hBdHRyaWJ1dGVDb3JuZXJUYWJsZUVFRUVFAE41ZHJhY280ME1lc2hQcmVkaWN0aW9uU2NoZW1lUGFyYWxsZWxvZ3JhbUVuY29kZXJJaU5TXzYyUHJlZGljdGlvblNjaGVtZU5vcm1hbE9jdGFoZWRyb25DYW5vbmljYWxpemVkRW5jb2RpbmdUcmFuc2Zvcm1JaUVFTlNfMjRNZXNoUHJlZGljdGlvblNjaGVtZURhdGFJTlNfMjRNZXNoQXR0cmlidXRlQ29ybmVyVGFibGVFRUVFRQBONWRyYWNvMzJTZXF1ZW50aWFsTm9ybWFsQXR0cmlidXRlRW5jb2RlckUATjVkcmFjbzM4U2VxdWVudGlhbFF1YW50aXphdGlvbkF0dHJpYnV0ZUVuY29kZXJFAHByZWRpY3Rpb25fc2NoZW1lAE41ZHJhY28xMUVuY29kZXJCYXNlSU5TXzE4RW5jb2Rlck9wdGlvbnNCYXNlSWlFRUVFAE41ZHJhY283RW5jb2RlckUAIABONWRyYWNvMTNFeHBlcnRFbmNvZGVyRQBlbmNvZGluZ19tZXRob2QAcXVhbnRpemF0aW9uX2JpdHMASW52YWxpZCBlbmNvZGluZyBtZXRob2QuAGVuY29kaW5nX3NwZWVkAGRlY29kaW5nX3NwZWVkAHF1YW50aXphdGlvbl9vcmlnaW4AcXVhbnRpemF0aW9uX3JhbmdlAHN5bWJvbF9lbmNvZGluZ19tZXRob2QAc3ltYm9sX2VuY29kaW5nX2NvbXByZXNzaW9uX2xldmVsAHN0YW5kYXJkX2VkZ2VicmVha2VyAHByZWRpY3RpdmVfZWRnZWJyZWFrZXIAZWRnZWJyZWFrZXJfbWV0aG9kAE41ZHJhY28yMk1lc2hFZGdlYnJlYWtlckVuY29kZXJFAEFsbCB0cmlhbmdsZXMgYXJlIGRlZ2VuZXJhdGUuAEZhaWxlZCB0byBwcm9jZXNzIG1lc2ggaG9sZXMuAEZhaWxlZCB0byBpbml0aWFsaXplIGF0dHJpYnV0ZSBkYXRhLgBGYWlsZWQgdG8gZW5jb2RlIG1lc2ggY29tcG9uZW50LgBGYWlsZWQgdG8gZW5jb2RlIHNwbGl0IGRhdGEuAE41ZHJhY28xM1RyYXZlcnNlckJhc2VJTlNfMjRNZXNoQXR0cmlidXRlQ29ybmVyVGFibGVFTlNfMzZNZXNoQXR0cmlidXRlSW5kaWNlc0VuY29kaW5nT2JzZXJ2ZXJJUzFfRUVFRQBONWRyYWNvMTlEZXB0aEZpcnN0VHJhdmVyc2VySU5TXzI0TWVzaEF0dHJpYnV0ZUNvcm5lclRhYmxlRU5TXzM2TWVzaEF0dHJpYnV0ZUluZGljZXNFbmNvZGluZ09ic2VydmVySVMxX0VFRUUATjVkcmFjbzIyTWVzaFRyYXZlcnNhbFNlcXVlbmNlcklOU18xOURlcHRoRmlyc3RUcmF2ZXJzZXJJTlNfMjRNZXNoQXR0cmlidXRlQ29ybmVyVGFibGVFTlNfMzZNZXNoQXR0cmlidXRlSW5kaWNlc0VuY29kaW5nT2JzZXJ2ZXJJUzJfRUVFRUVFAE41ZHJhY28xNVBvaW50c1NlcXVlbmNlckUATjVkcmFjbzEzVHJhdmVyc2VyQmFzZUlOU18xMUNvcm5lclRhYmxlRU5TXzM2TWVzaEF0dHJpYnV0ZUluZGljZXNFbmNvZGluZ09ic2VydmVySVMxX0VFRUUATjVkcmFjbzE5RGVwdGhGaXJzdFRyYXZlcnNlcklOU18xMUNvcm5lclRhYmxlRU5TXzM2TWVzaEF0dHJpYnV0ZUluZGljZXNFbmNvZGluZ09ic2VydmVySVMxX0VFRUUATjVkcmFjbzIyTWVzaFRyYXZlcnNhbFNlcXVlbmNlcklOU18xOURlcHRoRmlyc3RUcmF2ZXJzZXJJTlNfMTFDb3JuZXJUYWJsZUVOU18zNk1lc2hBdHRyaWJ1dGVJbmRpY2VzRW5jb2RpbmdPYnNlcnZlcklTMl9FRUVFRUUATjVkcmFjbzI4TWF4UHJlZGljdGlvbkRlZ3JlZVRyYXZlcnNlcklOU18xMUNvcm5lclRhYmxlRU5TXzM2TWVzaEF0dHJpYnV0ZUluZGljZXNFbmNvZGluZ09ic2VydmVySVMxX0VFRUUATjVkcmFjbzIyTWVzaFRyYXZlcnNhbFNlcXVlbmNlcklOU18yOE1heFByZWRpY3Rpb25EZWdyZWVUcmF2ZXJzZXJJTlNfMTFDb3JuZXJUYWJsZUVOU18zNk1lc2hBdHRyaWJ1dGVJbmRpY2VzRW5jb2RpbmdPYnNlcnZlcklTMl9FRUVFRUUAc3BsaXRfbWVzaF9vbl9zZWFtcwBONWRyYWNvMjZNZXNoRWRnZWJyZWFrZXJFbmNvZGVySW1wbElOU18zMU1lc2hFZGdlYnJlYWtlclRyYXZlcnNhbEVuY29kZXJFRUUATjVkcmFjbzM1TWVzaEVkZ2VicmVha2VyRW5jb2RlckltcGxJbnRlcmZhY2VFAE41ZHJhY28yNk1lc2hFZGdlYnJlYWtlckVuY29kZXJJbXBsSU5TXzM4TWVzaEVkZ2VicmVha2VyVHJhdmVyc2FsVmFsZW5jZUVuY29kZXJFRUUAc3RvcmVfbnVtYmVyX29mX2VuY29kZWRfZmFjZXMATjVkcmFjbzExTWVzaEVuY29kZXJFAGNvbXByZXNzX2Nvbm5lY3Rpdml0eQBONWRyYWNvMTVMaW5lYXJTZXF1ZW5jZXJFAE41ZHJhY28yMU1lc2hTZXF1ZW50aWFsRW5jb2RlckUATjVkcmFjbzE3UG9pbnRDbG91ZEVuY29kZXJFAEludmFsaWQgaW5wdXQgZ2VvbWV0cnkuAEZhaWxlZCB0byBpbml0aWFsaXplIGVuY29kZXIuAEZhaWxlZCB0byBlbmNvZGUgaW50ZXJuYWwgZGF0YS4ARmFpbGVkIHRvIGVuY29kZSBwb2ludCBhdHRyaWJ1dGVzLgBzdG9yZV9udW1iZXJfb2ZfZW5jb2RlZF9wb2ludHMARmFpbGVkIHRvIGVuY29kZSBtZXRhZGF0YS4ARFJBQ08ATjVkcmFjbzIzUG9pbnRDbG91ZEtkVHJlZUVuY29kZXJFAE41ZHJhY28yN1BvaW50Q2xvdWRTZXF1ZW50aWFsRW5jb2RlckUATjVkcmFjbzRNZXNoRQBhbGxvY2F0b3I8VD46OmFsbG9jYXRlKHNpemVfdCBuKSAnbicgZXhjZWVkcyBtYXhpbXVtIHN1cHBvcnRlZCBzaXplAE41ZHJhY28xMFBvaW50Q2xvdWRFABEACgAREREAAAAABQAAAAAAAAkAAAAACwAAAAAAAAAAEQAPChEREQMKBwABEwkLCwAACQYLAAALAAYRAAAAERERAAAAAAAAAAAAAAAAAAAAAAsAAAAAAAAAABEACgoREREACgAAAgAJCwAAAAkACwAACwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMAAAAAAAAAAAAAAAMAAAAAAwAAAAACQwAAAAAAAwAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAADQAAAAQNAAAAAAkOAAAAAAAOAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAA8AAAAADwAAAAAJEAAAAAAAEAAAEAAAEgAAABISEgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAASAAAAEhISAAAAAAAACQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACwAAAAAAAAAAAAAACgAAAAAKAAAAAAkLAAAAAAALAAALAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwAAAAAAAAAAAAAAAwAAAAADAAAAAAJDAAAAAAADAAADAAALSsgICAwWDB4AChudWxsKQAtMFgrMFggMFgtMHgrMHggMHgAaW5mAElORgBOQU4AMDEyMzQ1Njc4OUFCQ0RFRi4AVCEiGQ0BAgMRSxwMEAQLHRIeJ2hub3BxYiAFBg8TFBUaCBYHKCQXGAkKDhsfJSODgn0mKis8PT4/Q0dKTVhZWltcXV5fYGFjZGVmZ2lqa2xyc3R5ent8AElsbGVnYWwgYnl0ZSBzZXF1ZW5jZQBEb21haW4gZXJyb3IAUmVzdWx0IG5vdCByZXByZXNlbnRhYmxlAE5vdCBhIHR0eQBQZXJtaXNzaW9uIGRlbmllZABPcGVyYXRpb24gbm90IHBlcm1pdHRlZABObyBzdWNoIGZpbGUgb3IgZGlyZWN0b3J5AE5vIHN1Y2ggcHJvY2VzcwBGaWxlIGV4aXN0cwBWYWx1ZSB0b28gbGFyZ2UgZm9yIGRhdGEgdHlwZQBObyBzcGFjZSBsZWZ0IG9uIGRldmljZQBPdXQgb2YgbWVtb3J5AFJlc291cmNlIGJ1c3kASW50ZXJydXB0ZWQgc3lzdGVtIGNhbGwAUmVzb3VyY2UgdGVtcG9yYXJpbHkgdW5hdmFpbGFibGUASW52YWxpZCBzZWVrAENyb3NzLWRldmljZSBsaW5rAFJlYWQtb25seSBmaWxlIHN5c3RlbQBEaXJlY3Rvcnkgbm90IGVtcHR5AENvbm5lY3Rpb24gcmVzZXQgYnkgcGVlcgBPcGVyYXRpb24gdGltZWQgb3V0AENvbm5lY3Rpb24gcmVmdXNlZABIb3N0IGlzIGRvd24ASG9zdCBpcyB1bnJlYWNoYWJsZQBBZGRyZXNzIGluIHVzZQBCcm9rZW4gcGlwZQBJL08gZXJyb3IATm8gc3VjaCBkZXZpY2Ugb3IgYWRkcmVzcwBCbG9jayBkZXZpY2UgcmVxdWlyZWQATm8gc3VjaCBkZXZpY2UATm90IGEgZGlyZWN0b3J5AElzIGEgZGlyZWN0b3J5AFRleHQgZmlsZSBidXN5AEV4ZWMgZm9ybWF0IGVycm9yAEludmFsaWQgYXJndW1lbnQAQXJndW1lbnQgbGlzdCB0b28gbG9uZwBTeW1ib2xpYyBsaW5rIGxvb3AARmlsZW5hbWUgdG9vIGxvbmcAVG9vIG1hbnkgb3BlbiBmaWxlcyBpbiBzeXN0ZW0ATm8gZmlsZSBkZXNjcmlwdG9ycyBhdmFpbGFibGUAQmFkIGZpbGUgZGVzY3JpcHRvcgBObyBjaGlsZCBwcm9jZXNzAEJhZCBhZGRyZXNzAEZpbGUgdG9vIGxhcmdlAFRvbyBtYW55IGxpbmtzAE5vIGxvY2tzIGF2YWlsYWJsZQBSZXNvdXJjZSBkZWFkbG9jayB3b3VsZCBvY2N1cgBTdGF0ZSBub3QgcmVjb3ZlcmFibGUAUHJldmlvdXMgb3duZXIgZGllZABPcGVyYXRpb24gY2FuY2VsZWQARnVuY3Rpb24gbm90IGltcGxlbWVudGVkAE5vIG1lc3NhZ2Ugb2YgZGVzaXJlZCB0eXBlAElkZW50aWZpZXIgcmVtb3ZlZABEZXZpY2Ugbm90IGEgc3RyZWFtAE5vIGRhdGEgYXZhaWxhYmxlAERldmljZSB0aW1lb3V0AE91dCBvZiBzdHJlYW1zIHJlc291cmNlcwBMaW5rIGhhcyBiZWVuIHNldmVyZWQAUHJvdG9jb2wgZXJyb3IAQmFkIG1lc3NhZ2UARmlsZSBkZXNjcmlwdG9yIGluIGJhZCBzdGF0ZQBOb3QgYSBzb2NrZXQARGVzdGluYXRpb24gYWRkcmVzcyByZXF1aXJlZABNZXNzYWdlIHRvbyBsYXJnZQBQcm90b2NvbCB3cm9uZyB0eXBlIGZvciBzb2NrZXQAUHJvdG9jb2wgbm90IGF2YWlsYWJsZQBQcm90b2NvbCBub3Qgc3VwcG9ydGVkAFNvY2tldCB0eXBlIG5vdCBzdXBwb3J0ZWQATm90IHN1cHBvcnRlZABQcm90b2NvbCBmYW1pbHkgbm90IHN1cHBvcnRlZABBZGRyZXNzIGZhbWlseSBub3Qgc3VwcG9ydGVkIGJ5IHByb3RvY29sAEFkZHJlc3Mgbm90IGF2YWlsYWJsZQBOZXR3b3JrIGlzIGRvd24ATmV0d29yayB1bnJlYWNoYWJsZQBDb25uZWN0aW9uIHJlc2V0IGJ5IG5ldHdvcmsAQ29ubmVjdGlvbiBhYm9ydGVkAE5vIGJ1ZmZlciBzcGFjZSBhdmFpbGFibGUAU29ja2V0IGlzIGNvbm5lY3RlZABTb2NrZXQgbm90IGNvbm5lY3RlZABDYW5ub3Qgc2VuZCBhZnRlciBzb2NrZXQgc2h1dGRvd24AT3BlcmF0aW9uIGFscmVhZHkgaW4gcHJvZ3Jlc3MAT3BlcmF0aW9uIGluIHByb2dyZXNzAFN0YWxlIGZpbGUgaGFuZGxlAFJlbW90ZSBJL08gZXJyb3IAUXVvdGEgZXhjZWVkZWQATm8gbWVkaXVtIGZvdW5kAFdyb25nIG1lZGl1bSB0eXBlAE5vIGVycm9yIGluZm9ybWF0aW9uAABpbmZpbml0eQBuYW4AJWQAJWYAdGVybWluYXRpbmcgd2l0aCAlcyBleGNlcHRpb24gb2YgdHlwZSAlczogJXMAdGVybWluYXRpbmcgd2l0aCAlcyBleGNlcHRpb24gb2YgdHlwZSAlcwB0ZXJtaW5hdGluZyB3aXRoICVzIGZvcmVpZ24gZXhjZXB0aW9uAHRlcm1pbmF0aW5nAHVuY2F1Z2h0AFN0OWV4Y2VwdGlvbgBOMTBfX2N4eGFiaXYxMTZfX3NoaW1fdHlwZV9pbmZvRQBTdDl0eXBlX2luZm8ATjEwX19jeHhhYml2MTIwX19zaV9jbGFzc190eXBlX2luZm9FAE4xMF9fY3h4YWJpdjExN19fY2xhc3NfdHlwZV9pbmZvRQBwdGhyZWFkX29uY2UgZmFpbHVyZSBpbiBfX2N4YV9nZXRfZ2xvYmFsc19mYXN0KCkAY2Fubm90IGNyZWF0ZSBwdGhyZWFkIGtleSBmb3IgX19jeGFfZ2V0X2dsb2JhbHMoKQBjYW5ub3QgemVybyBvdXQgdGhyZWFkIHZhbHVlIGZvciBfX2N4YV9nZXRfZ2xvYmFscygpAHRlcm1pbmF0ZV9oYW5kbGVyIHVuZXhwZWN0ZWRseSByZXR1cm5lZABTdDExbG9naWNfZXJyb3IAU3QxMmxlbmd0aF9lcnJvcgBOMTBfX2N4eGFiaXYxMTlfX3BvaW50ZXJfdHlwZV9pbmZvRQBOMTBfX2N4eGFiaXYxMTdfX3BiYXNlX3R5cGVfaW5mb0U=";var tempDoublePtr=STATICTOP;STATICTOP+=16;function ___cxa_allocate_exception(size){return _malloc(size)}function __ZSt18uncaught_exceptionv(){return!!__ZSt18uncaught_exceptionv.uncaught_exception}var EXCEPTIONS={last:0,caught:[],infos:{},deAdjust:(function(adjusted){if(!adjusted||EXCEPTIONS.infos[adjusted])return adjusted;for(var ptr in EXCEPTIONS.infos){var info=EXCEPTIONS.infos[ptr];if(info.adjusted===adjusted){return ptr}}return adjusted}),addRef:(function(ptr){if(!ptr)return;var info=EXCEPTIONS.infos[ptr];info.refcount++}),decRef:(function(ptr){if(!ptr)return;var info=EXCEPTIONS.infos[ptr];assert(info.refcount>0);info.refcount--;if(info.refcount===0&&!info.rethrown){if(info.destructor){Module["dynCall_vi"](info.destructor,ptr)}delete EXCEPTIONS.infos[ptr];___cxa_free_exception(ptr)}}),clearRef:(function(ptr){if(!ptr)return;var info=EXCEPTIONS.infos[ptr];info.refcount=0})};function ___cxa_begin_catch(ptr){var info=EXCEPTIONS.infos[ptr];if(info&&!info.caught){info.caught=true;__ZSt18uncaught_exceptionv.uncaught_exception--}if(info)info.rethrown=false;EXCEPTIONS.caught.push(ptr);EXCEPTIONS.addRef(EXCEPTIONS.deAdjust(ptr));return ptr}function ___cxa_pure_virtual(){ABORT=true;throw"Pure virtual function called!"}function ___resumeException(ptr){if(!EXCEPTIONS.last){EXCEPTIONS.last=ptr}throw ptr+" - Exception catching is disabled, this exception cannot be caught. Compile with -s DISABLE_EXCEPTION_CATCHING=0 or DISABLE_EXCEPTION_CATCHING=2 to catch."}function ___cxa_find_matching_catch(){var thrown=EXCEPTIONS.last;if(!thrown){return(setTempRet0(0),0)|0}var info=EXCEPTIONS.infos[thrown];var throwntype=info.type;if(!throwntype){return(setTempRet0(0),thrown)|0}var typeArray=Array.prototype.slice.call(arguments);var pointer=Module["___cxa_is_pointer_type"](throwntype);if(!___cxa_find_matching_catch.buffer)___cxa_find_matching_catch.buffer=_malloc(4);HEAP32[___cxa_find_matching_catch.buffer>>2]=thrown;thrown=___cxa_find_matching_catch.buffer;for(var i=0;i>2];info.adjusted=thrown;return(setTempRet0(typeArray[i]),thrown)|0}}thrown=HEAP32[thrown>>2];return(setTempRet0(throwntype),thrown)|0}function ___cxa_throw(ptr,type,destructor){EXCEPTIONS.infos[ptr]={ptr:ptr,adjusted:ptr,type:type,destructor:destructor,refcount:0,caught:false,rethrown:false};EXCEPTIONS.last=ptr;if(!("uncaught_exception"in __ZSt18uncaught_exceptionv)){__ZSt18uncaught_exceptionv.uncaught_exception=1}else{__ZSt18uncaught_exceptionv.uncaught_exception++}throw ptr+" - Exception catching is disabled, this exception cannot be caught. Compile with -s DISABLE_EXCEPTION_CATCHING=0 or DISABLE_EXCEPTION_CATCHING=2 to catch."}var cttz_i8=allocate([8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0],"i8",ALLOC_STATIC);function ___gxx_personality_v0(){}var SYSCALLS={varargs:0,get:(function(varargs){SYSCALLS.varargs+=4;var ret=HEAP32[SYSCALLS.varargs-4>>2];return ret}),getStr:(function(){var ret=Pointer_stringify(SYSCALLS.get());return ret}),get64:(function(){var low=SYSCALLS.get(),high=SYSCALLS.get();if(low>=0)assert(high===0);else assert(high===-1);return low}),getZero:(function(){assert(SYSCALLS.get()===0)})};function ___syscall140(which,varargs){SYSCALLS.varargs=varargs;try{var stream=SYSCALLS.getStreamFromFD(),offset_high=SYSCALLS.get(),offset_low=SYSCALLS.get(),result=SYSCALLS.get(),whence=SYSCALLS.get();var offset=offset_low;FS.llseek(stream,offset,whence);HEAP32[result>>2]=stream.position;if(stream.getdents&&offset===0&&whence===0)stream.getdents=null;return 0}catch(e){if(typeof FS==="undefined"||!(e instanceof FS.ErrnoError))abort(e);return-e.errno}}function flush_NO_FILESYSTEM(){var fflush=Module["_fflush"];if(fflush)fflush(0);var printChar=___syscall146.printChar;if(!printChar)return;var buffers=___syscall146.buffers;if(buffers[1].length)printChar(1,10);if(buffers[2].length)printChar(2,10)}function ___syscall146(which,varargs){SYSCALLS.varargs=varargs;try{var stream=SYSCALLS.get(),iov=SYSCALLS.get(),iovcnt=SYSCALLS.get();var ret=0;if(!___syscall146.buffers){___syscall146.buffers=[null,[],[]];___syscall146.printChar=(function(stream,curr){var buffer=___syscall146.buffers[stream];assert(buffer);if(curr===0||curr===10){(stream===1?Module["print"]:Module["printErr"])(UTF8ArrayToString(buffer,0));buffer.length=0}else{buffer.push(curr)}})}for(var i=0;i>2];var len=HEAP32[iov+(i*8+4)>>2];for(var j=0;j>2]=PTHREAD_SPECIFIC_NEXT_KEY;PTHREAD_SPECIFIC[PTHREAD_SPECIFIC_NEXT_KEY]=0;PTHREAD_SPECIFIC_NEXT_KEY++;return 0}function _pthread_once(ptr,func){if(!_pthread_once.seen)_pthread_once.seen={};if(ptr in _pthread_once.seen)return;Module["dynCall_v"](func);_pthread_once.seen[ptr]=1}function _pthread_setspecific(key,value){if(!(key in PTHREAD_SPECIFIC)){return ERRNO_CODES.EINVAL}PTHREAD_SPECIFIC[key]=value;return 0}function ___setErrNo(value){if(Module["___errno_location"])HEAP32[Module["___errno_location"]()>>2]=value;return value}DYNAMICTOP_PTR=staticAlloc(4);STACK_BASE=STACKTOP=alignMemory(STATICTOP);STACK_MAX=STACK_BASE+TOTAL_STACK;DYNAMIC_BASE=alignMemory(STACK_MAX);HEAP32[DYNAMICTOP_PTR>>2]=DYNAMIC_BASE;staticSealed=true;var ASSERTIONS=false;function intArrayFromString(stringy,dontAddNull,length){var len=length>0?length:lengthBytesUTF8(stringy)+1;var u8array=new Array(len);var numBytesWritten=stringToUTF8Array(stringy,u8array,0,u8array.length);if(dontAddNull)u8array.length=numBytesWritten;return u8array}function intArrayToString(array){var ret=[];for(var i=0;i255){if(ASSERTIONS){assert(false,"Character code "+chr+" ("+String.fromCharCode(chr)+") at offset "+i+" not in 0x00-0xFF.")}chr&=255}ret.push(String.fromCharCode(chr))}return ret.join("")}var decodeBase64=typeof atob==="function"?atob:(function(input){var keyStr="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";var output="";var chr1,chr2,chr3;var enc1,enc2,enc3,enc4;var i=0;input=input.replace(/[^A-Za-z0-9\+\/\=]/g,"");do{enc1=keyStr.indexOf(input.charAt(i++));enc2=keyStr.indexOf(input.charAt(i++));enc3=keyStr.indexOf(input.charAt(i++));enc4=keyStr.indexOf(input.charAt(i++));chr1=enc1<<2|enc2>>4;chr2=(enc2&15)<<4|enc3>>2;chr3=(enc3&3)<<6|enc4;output=output+String.fromCharCode(chr1);if(enc3!==64){output=output+String.fromCharCode(chr2)}if(enc4!==64){output=output+String.fromCharCode(chr3)}}while(i2147483648)return false;b=new a(newBuffer);d=new c(newBuffer);f=new e(newBuffer);h=new g(newBuffer);j=new i(newBuffer);l=new k(newBuffer);n=new m(newBuffer);p=new o(newBuffer);buffer=newBuffer;return true}
-// EMSCRIPTEN_START_FUNCS
-function wc(a){a=a|0;var b=0,c=0,d=0,e=0,g=0,h=0,i=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0,q=0,r=0,s=0,t=0,u=0,v=0,w=0,x=0,y=0,z=0,A=0,B=0,C=0,D=0;if(!a)return;b=a+-8|0;c=f[4516]|0;d=f[a+-4>>2]|0;a=d&-8;e=b+a|0;do if(!(d&1)){g=f[b>>2]|0;if(!(d&3))return;h=b+(0-g)|0;i=g+a|0;if(h>>>0>>0)return;if((f[4517]|0)==(h|0)){j=e+4|0;k=f[j>>2]|0;if((k&3|0)!=3){l=h;m=i;n=h;break}f[4514]=i;f[j>>2]=k&-2;f[h+4>>2]=i|1;f[h+i>>2]=i;return}k=g>>>3;if(g>>>0<256){g=f[h+8>>2]|0;j=f[h+12>>2]|0;if((j|0)==(g|0)){f[4512]=f[4512]&~(1<